module RL export run_rl using ReinforcementLearning using Flux: InvDecay using Intervals using StaticArrays: SVector using LoopVectorization: @turbo using Random: Random using ProgressMeter: @showprogress using ..ReCo: ReCo, Particle, angle2, center_of_mass const INITIAL_REWARD = 0.0 mutable struct Env <: AbstractEnv n_actions::Int64 action_space::Vector{SVector{2,Float64}} action_ind_space::Vector{Int64} distance_state_space::Vector{Interval} angle_state_space::Vector{Interval} n_states::Int64 state_space::Vector{SVector{2,Interval}} state_ind_space::Vector{Int64} state_ind::Int64 reward::Float64 terminated::Bool center_of_mass::SVector{2,Float64} function Env( min_distance::Float64, max_distance::Float64; n_v_actions::Int64=3, n_ω_actions::Int64=5, max_v::Float64=60.0, max_ω::Float64=π / 2, n_distance_states::Int64=3, n_angle_states::Int64=4, ) @assert min_distance > 0.0 @assert max_distance > min_distance @assert n_v_actions > 1 @assert n_ω_actions > 1 @assert max_v > 0 @assert max_ω > 0 v_action_space = 0.0:(max_v / (n_v_actions - 1)):max_v ω_action_space = (-max_ω):(2 * max_ω / (n_ω_actions - 1)):max_ω n_actions = n_v_actions * n_ω_actions action_space = Vector{SVector{2,Float64}}(undef, n_actions) ind = 1 for v in v_action_space for ω in ω_action_space action_space[ind] = SVector(v, ω) ind += 1 end end action_ind_space = collect(1:n_actions) distance_range = min_distance:((max_distance - min_distance) / n_distance_states):max_distance distance_state_space = Vector{Interval}(undef, n_distance_states) @simd for i in 1:n_distance_states if i == 1 bound = Closed else bound = Open end distance_state_space[i] = Interval{Float64,bound,Closed}( distance_range[i], distance_range[i + 1] ) end angle_range = (-π):(2 * π / n_angle_states):π angle_state_space = Vector{Interval}(undef, n_angle_states) @simd for i in 1:n_angle_states if i == 1 bound = Closed else bound = Open end angle_state_space[i] = Interval{Float64,bound,Closed}( angle_range[i], angle_range[i + 1] ) end n_states = n_distance_states * n_angle_states + 1 state_space = Vector{SVector{2,Interval}}(undef, n_states - 1) ind = 1 for distance_state in distance_state_space for angle_state in angle_state_space state_space[ind] = SVector(distance_state, angle_state) ind += 1 end end # Last state is SVector(nothing, nothing) state_ind_space = collect(1:n_states) # initial_state = SVector(nothing, nothing) initial_state_ind = n_states return new( n_actions, action_space, action_ind_space, distance_state_space, angle_state_space, n_states, state_space, state_ind_space, initial_state_ind, INITIAL_REWARD, false, SVector(0.0, 0.0), ) end end function reset!(env::Env) env.state_ind = env.n_states env.reward = INITIAL_REWARD env.terminated = false return nothing end RLBase.state_space(env::Env) = env.state_ind_space RLBase.state(env::Env) = env.state_ind RLBase.action_space(env::Env) = env.action_ind_space RLBase.reward(env::Env) = env.reward RLBase.is_terminated(env::Env) = env.terminated struct Params{H<:AbstractHook} env::Env agent::Agent hook::H old_states_ind::Vector{Int64} states_ind::Vector{Int64} actions::Vector{SVector{2,Float64}} actions_ind::Vector{Int64} n_steps_before_actions_update::Int64 goal_shape_ratio::Float64 n_particles::Int64 min_sq_distances::Vector{Float64} vecs_r⃗₁₂_to_min_distance_particle::Vector{SVector{2,Float64}} half_box_len::Float64 max_elliptic_distance::Float64 function Params( env::Env, agent::Agent, hook::H, n_steps_before_actions_update::Int64, goal_shape_ratio::Float64, n_particles::Int64, half_box_len::Float64, ) where {H<:AbstractHook} max_elliptic_distance = sqrt(2) * half_box_len n_states = env.n_states return new{H}( env, agent, hook, fill(0, n_particles), fill(n_states, n_particles), fill(SVector(0.0, 0.0), n_particles), fill(0, n_particles), n_steps_before_actions_update, goal_shape_ratio, n_particles, fill(Inf64, n_particles), fill(SVector(0.0, 0.0), n_particles), half_box_len, max_elliptic_distance, ) end end function pre_integration_hook(rl_params::Params) @turbo for i in 1:(rl_params.n_particles) rl_params.min_sq_distances[i] = Inf64 end return nothing end function state_update_helper_hook( rl_params::Params, id1::Int64, id2::Int64, r⃗₁₂::SVector{2,Float64}, distance²::Float64 ) if rl_params.min_sq_distances[id1] > distance² rl_params.min_sq_distances[id1] = distance² rl_params.vecs_r⃗₁₂_to_min_distance_particle[id1] = r⃗₁₂ end if rl_params.min_sq_distances[id2] > distance² rl_params.min_sq_distances[id2] = distance² rl_params.vecs_r⃗₁₂_to_min_distance_particle[id2] = -r⃗₁₂ end return nothing end function get_state_ind(state::S, state_space::Vector{S}) where {S<:SVector{2,Interval}} return findfirst(x -> x == state, state_space) end function state_update_hook(rl_params::Params, particles::Vector{Particle}) @turbo for id in 1:(rl_params.n_particles) rl_params.old_states_ind[id] = rl_params.states_ind[id] end env = rl_params.env n_states = env.n_states env_angle_state = env.angle_state_space[1] state_space = env.state_space for id in 1:(rl_params.n_particles) env_distance_state::Union{Interval,Nothing} = nothing min_sq_distance = rl_params.min_sq_distances[id] min_distance = sqrt(min_sq_distance) if !isinf(min_sq_distance) for distance_state in env.distance_state_space if min_distance in distance_state env_distance_state = distance_state break end end end # (nothing, nothing) state_ind = n_states if !isnothing(env_distance_state) r⃗₁₂ = rl_params.vecs_r⃗₁₂_to_min_distance_particle[id] si, co = sincos(particles[id].φ) #= Angle between two vectors e = (co, si) angle = acos(dot(r⃗₁₂, e) / (norm(r⃗₁₂) * norm(e))) norm(r⃗₁₂) == min_distance norm(e) == 1 min_distance is not infinite, because otherwise env_angle_state would be nothing and this else block will not be called =# angle = angle2(SVector(co, si), r⃗₁₂) for angle_state in env.angle_state_space if angle in angle_state env_angle_state = angle_state end end state = SVector{2,Interval}(env_distance_state, env_angle_state) state_ind = get_state_ind(state, state_space) end rl_params.states_ind[id] = state_ind end env.center_of_mass = center_of_mass(particles, rl_params.half_box_len) return nothing end function get_env_agent_hook(rl_params::Params) return (rl_params.env, rl_params.agent, rl_params.hook) end function update_table_and_actions_hook( rl_params::Params, particle::Particle, first_integration_step::Bool ) env, agent, hook = get_env_agent_hook(rl_params) id = particle.id if !first_integration_step # Old state env.state_ind = rl_params.old_states_ind[id] action_ind = rl_params.actions_ind[id] # Pre act agent(PRE_ACT_STAGE, env, action_ind) hook(PRE_ACT_STAGE, agent, env, action_ind) # Update to current state env.state_ind = rl_params.states_ind[id] # Update reward vec_to_center_of_mass = ReCo.minimum_image( particle.c - env.center_of_mass, rl_params.half_box_len ) env.reward = -(vec_to_center_of_mass[1]^2 + vec_to_center_of_mass[2]^2) / rl_params.max_elliptic_distance / rl_params.n_particles # Post act agent(POST_ACT_STAGE, env) hook(POST_ACT_STAGE, agent, env) end # Update action action_ind = agent(env) action = env.action_space[action_ind] rl_params.actions[id] = action rl_params.actions_ind[id] = action_ind return nothing end act_hook(::Nothing, args...) = nothing function act_hook( rl_params::Params, particle::Particle, δt::Float64, si::Float64, co::Float64 ) # Apply action action = rl_params.actions[particle.id] vδt = action[1] * δt particle.tmp_c += SVector(vδt * co, vδt * si) particle.φ += action[2] * δt return nothing end function gen_agent(n_states::Int64, n_actions::Int64, ϵ::Float64) policy = QBasedPolicy(; learner=MonteCarloLearner(; approximator=TabularQApproximator(; n_state=n_states, n_action=n_actions, opt=InvDecay(1.0) ), ), explorer=EpsilonGreedyExplorer(ϵ), ) return Agent(; policy=policy, trajectory=VectorSARTTrajectory()) end function run_rl(; goal_shape_ratio::Float64, n_episodes::Int64=200, episode_duration::Float64=50.0, update_actions_at::Float64=0.1, n_particles::Int64=100, seed::Int64=42, ϵ::Float64=0.01, parent_dir::String="", ) @assert 0.0 <= goal_shape_ratio <= 1.0 @assert n_episodes > 0 @assert episode_duration > 0 @assert update_actions_at in 0.001:0.001:episode_duration @assert n_particles > 0 @assert 0.0 < ϵ < 1.0 # Setup Random.seed!(seed) sim_consts = ReCo.gen_sim_consts( n_particles, 0.0; skin_to_interaction_r_ratio=1.8, packing_ratio=0.15 ) n_particles = sim_consts.n_particles env = Env(sim_consts.particle_radius, sim_consts.skin_r) agent = gen_agent(env.n_states, env.n_actions, ϵ) n_steps_before_actions_update = round(Int64, update_actions_at / sim_consts.δt) hook = TotalRewardPerEpisode() rl_params = Params( env, agent, hook, n_steps_before_actions_update, goal_shape_ratio, n_particles, sim_consts.half_box_len, ) parent_dir = "RL" * parent_dir # Pre experiment hook(PRE_EXPERIMENT_STAGE, agent, env) agent(PRE_EXPERIMENT_STAGE, env) @showprogress 0.6 for episode in 1:n_episodes dir = ReCo.init_sim_with_sim_consts(sim_consts; parent_dir=parent_dir) # Reset reset!(env) # Pre espisode hook(PRE_EPISODE_STAGE, agent, env) agent(PRE_EPISODE_STAGE, env) # Episode ReCo.run_sim( dir; duration=episode_duration, seed=rand(1:typemax(Int64)), rl_params=rl_params ) env.terminated = true # Post episode hook(POST_EPISODE_STAGE, agent, env) agent(POST_EPISODE_STAGE, env) display(hook.rewards) end # Post experiment hook(POST_EXPERIMENT_STAGE, agent, env) return rl_params end end # module