module RL export run_rl using Base: OneTo using ReinforcementLearning using Flux: InvDecay using Intervals using StaticArrays: SVector using LoopVectorization: @turbo using Random: Random using ProgressMeter: @showprogress using ..ReCo: ReCo, Particle, angle2, Shape const INITIAL_REWARD = 0.0 const INITIAL_STATE_IND = 1 function angle_state_space(n_angle_states::Int64) angle_range = range(; start=-π, stop=π, length=n_angle_states + 1) angle_state_space = Vector{Interval}(undef, n_angle_states) @simd for i in 1:n_angle_states if i == 1 bound = Closed else bound = Open end angle_state_space[i] = Interval{Float64,bound,Closed}( angle_range[i], angle_range[i + 1] ) end return angle_state_space end mutable struct Env <: AbstractEnv n_actions::Int64 action_space::Vector{SVector{2,Float64}} action_ind_space::OneTo{Int64} distance_state_space::Vector{Interval} direction_angle_state_space::Vector{Interval} n_states::Int64 state_space::Vector{SVector{2,Interval}} state_ind_space::OneTo{Int64} state_ind::Int64 reward::Float64 terminated::Bool function Env(; max_distance::Float64, min_distance::Float64=0.0, n_v_actions::Int64=2, n_ω_actions::Int64=3, max_v::Float64=40.0, max_ω::Float64=π / 2, n_distance_states::Int64=3, n_direction_angle_states::Int64=3, ) @assert min_distance >= 0.0 @assert max_distance > min_distance @assert n_v_actions > 1 @assert n_ω_actions > 1 @assert max_v > 0 @assert max_ω > 0 @assert n_distance_states > 1 @assert n_direction_angle_states > 1 v_action_space = range(; start=0.0, stop=max_v, length=n_v_actions) ω_action_space = range(; start=-max_ω, stop=max_ω, length=n_ω_actions) n_actions = n_v_actions * n_ω_actions action_space = Vector{SVector{2,Float64}}(undef, n_actions) ind = 1 for v in v_action_space for ω in ω_action_space action_space[ind] = SVector(v, ω) ind += 1 end end action_ind_space = OneTo(n_actions) distance_range = range(; start=min_distance, stop=max_distance, length=n_distance_states + 1 ) distance_state_space = Vector{Interval}(undef, n_distance_states) @simd for i in 1:n_distance_states if i == 1 bound = Closed else bound = Open end distance_state_space[i] = Interval{Float64,bound,Closed}( distance_range[i], distance_range[i + 1] ) end direction_angle_state_space = angle_state_space(n_direction_angle_states) n_states = n_distance_states * n_direction_angle_states + 1 state_space = Vector{SVector{2,Interval}}(undef, n_states - 1) ind = 1 for distance_state in distance_state_space for direction_angle_state in direction_angle_state_space state_space[ind] = SVector(distance_state, direction_angle_state) ind += 1 end end # Last state is when no particle is in the skin radius state_ind_space = OneTo(n_states) return new( n_actions, action_space, action_ind_space, distance_state_space, direction_angle_state_space, n_states, state_space, state_ind_space, INITIAL_STATE_IND, INITIAL_REWARD, false, ) end end function reset!(env::Env) env.state_ind = env.n_states env.terminated = false return nothing end RLBase.state_space(env::Env) = env.state_ind_space RLBase.state(env::Env) = env.state_ind RLBase.action_space(env::Env) = env.action_ind_space RLBase.reward(env::Env) = env.reward RLBase.is_terminated(env::Env) = env.terminated struct Params{H<:AbstractHook} env::Env agent::Agent hook::H old_states_ind::Vector{Int64} states_ind::Vector{Int64} actions::Vector{SVector{2,Float64}} actions_ind::Vector{Int64} n_steps_before_actions_update::Int64 goal_gyration_tensor_eigvals_ratio::Float64 n_particles::Int64 max_distance::Float64 vec_to_neighbour_sums::Vector{SVector{2,Float64}} n_neighbours::Vector{Int64} function Params( env::Env, agent::Agent, hook::H, n_steps_before_actions_update::Int64, goal_gyration_tensor_eigvals_ratio::Float64, n_particles::Int64, max_distance::Float64, ) where {H<:AbstractHook} n_states = env.n_states return new{H}( env, agent, hook, fill(0, n_particles), fill(n_states, n_particles), fill(SVector(0.0, 0.0), n_particles), fill(0, n_particles), n_steps_before_actions_update, goal_gyration_tensor_eigvals_ratio, n_particles, max_distance, fill(SVector(0.0, 0.0), n_particles), fill(0, n_particles), ) end end function pre_integration_hook(rl_params::Params) @simd for id in 1:(rl_params.n_particles) rl_params.vec_to_neighbour_sums[id] = SVector(0.0, 0.0) rl_params.n_neighbours[id] = 0 end return nothing end function state_update_helper_hook( rl_params::Params, id1::Int64, id2::Int64, r⃗₁₂::SVector{2,Float64} ) rl_params.vec_to_neighbour_sums[id1] += r⃗₁₂ rl_params.vec_to_neighbour_sums[id2] -= r⃗₁₂ rl_params.n_neighbours[id1] += 1 rl_params.n_neighbours[id2] += 1 return nothing end function find_state_ind(state::S, state_space::Vector{S}) where {S<:SVector{2,Interval}} return findfirst(x -> x == state, state_space) end function find_state_interval(value::Float64, state_space::Vector{Interval})::Interval for state in state_space if value in state return state end end end function state_update_hook(rl_params::Params, particles::Vector{Particle}) @turbo for id in 1:(rl_params.n_particles) rl_params.old_states_ind[id] = rl_params.states_ind[id] end env = rl_params.env for id in 1:(rl_params.n_particles) n_neighbours = rl_params.n_neighbours[id] if n_neighbours == 0 state_ind = env.n_states else vec_to_local_center_of_mass = rl_params.vec_to_neighbour_sums[id] / n_neighbours distance = sqrt( vec_to_local_center_of_mass[1]^2 + vec_to_local_center_of_mass[2]^2 ) distance_state = find_state_interval(distance, env.distance_state_space) si, co = sincos(particles[id].φ) direction_angle = angle2(SVector(co, si), vec_to_local_center_of_mass) direction_angle_state = find_state_interval( direction_angle, env.direction_angle_state_space ) state = SVector{2,Interval}(distance_state, direction_angle_state) state_ind = find_state_ind(state, env.state_space) end rl_params.states_ind[id] = state_ind end return nothing end function get_env_agent_hook(rl_params::Params) return (rl_params.env, rl_params.agent, rl_params.hook) end function update_reward!(env::Env, rl_params::Params, particle::Particle) id = particle.id normalization = (rl_params.max_distance * rl_params.n_particles) n_neighbours = rl_params.n_neighbours[id] if n_neighbours == 0 env.reward = -(rl_params.max_distance^2) / normalization else vec_to_local_center_of_mass = rl_params.vec_to_neighbour_sums[id] / n_neighbours # TODO: Reuse vec_to_local_center_of_mass from state_update_hook env.reward = -(vec_to_local_center_of_mass[1]^2 + vec_to_local_center_of_mass[2]^2) / normalization end return nothing end function update_table_and_actions_hook( rl_params::Params, particle::Particle, first_integration_step::Bool ) env, agent, hook = get_env_agent_hook(rl_params) id = particle.id if !first_integration_step # Old state env.state_ind = rl_params.old_states_ind[id] action_ind = rl_params.actions_ind[id] # Pre act agent(PRE_ACT_STAGE, env, action_ind) hook(PRE_ACT_STAGE, agent, env, action_ind) # Update to current state env.state_ind = rl_params.states_ind[id] # Update reward update_reward!(env, rl_params, particle) # Post act agent(POST_ACT_STAGE, env) hook(POST_ACT_STAGE, agent, env) end # Update action action_ind = agent(env) action = env.action_space[action_ind] rl_params.actions[id] = action rl_params.actions_ind[id] = action_ind return nothing end act_hook(::Nothing, args...) = nothing function act_hook( rl_params::Params, particle::Particle, δt::Float64, si::Float64, co::Float64 ) # Apply action action = rl_params.actions[particle.id] vδt = action[1] * δt particle.tmp_c += SVector(vδt * co, vδt * si) particle.φ += action[2] * δt return nothing end function gen_agent(n_states::Int64, n_actions::Int64, ϵ_stable::Float64) # TODO: Optimize warmup and decay warmup_steps = 200_000 decay_steps = 1_000_000 policy = QBasedPolicy(; learner=MonteCarloLearner(; approximator=TabularQApproximator(; n_state=n_states, n_action=n_actions, opt=InvDecay(1.0) ), ), explorer=EpsilonGreedyExplorer(; kind=:linear, ϵ_init=1.0, ϵ_stable=ϵ_stable, warmup_steps=warmup_steps, decay_steps=decay_steps, ), ) return Agent(; policy=policy, trajectory=VectorSARTTrajectory()) end function run_rl(; goal_gyration_tensor_eigvals_ratio::Float64, n_episodes::Int64=200, episode_duration::Float64=50.0, update_actions_at::Float64=0.1, n_particles::Int64=100, seed::Int64=42, ϵ_stable::Float64=0.0001, parent_dir::String="", ) @assert 0.0 <= goal_gyration_tensor_eigvals_ratio <= 1.0 @assert n_episodes > 0 @assert episode_duration > 0 @assert update_actions_at in 0.001:0.001:episode_duration @assert n_particles > 0 @assert 0.0 < ϵ_stable < 1.0 # Setup Random.seed!(seed) sim_consts = ReCo.gen_sim_consts( n_particles, 0.0; skin_to_interaction_r_ratio=2.0, packing_ratio=0.22 ) n_particles = sim_consts.n_particles max_distance = sim_consts.skin_r env = Env(; max_distance=max_distance) agent = gen_agent(env.n_states, env.n_actions, ϵ_stable) n_steps_before_actions_update = round(Int64, update_actions_at / sim_consts.δt) hook = TotalRewardPerEpisode() rl_params = Params( env, agent, hook, n_steps_before_actions_update, goal_gyration_tensor_eigvals_ratio, n_particles, max_distance, ) parent_dir = "RL" * parent_dir # Pre experiment hook(PRE_EXPERIMENT_STAGE, agent, env) agent(PRE_EXPERIMENT_STAGE, env) @showprogress 0.6 for episode in 1:n_episodes dir = ReCo.init_sim_with_sim_consts(sim_consts; parent_dir=parent_dir) # Reset reset!(env) # Pre espisode hook(PRE_EPISODE_STAGE, agent, env) agent(PRE_EPISODE_STAGE, env) # Episode ReCo.run_sim( dir; duration=episode_duration, seed=rand(1:typemax(Int64)), rl_params=rl_params ) env.terminated = true # Post episode hook(POST_EPISODE_STAGE, agent, env) agent(POST_EPISODE_STAGE, env) # TODO: Replace with live plot display(hook.rewards) display(agent.policy.explorer.step) end # Post experiment hook(POST_EXPERIMENT_STAGE, agent, env) return rl_params end end # module