From cfb38c6a19da40539eb5ae030794468bb99330b3 Mon Sep 17 00:00:00 2001 From: MoBit Date: Mon, 13 Dec 2021 02:24:34 +0100 Subject: [PATCH] state and action ind --- src/reinforcement_learning.jl | 102 +++++++++++++++++++++++++--------- src/setup.jl | 4 +- src/simulation.jl | 17 ++++-- 3 files changed, 89 insertions(+), 34 deletions(-) diff --git a/src/reinforcement_learning.jl b/src/reinforcement_learning.jl index b3f9436..79997fe 100644 --- a/src/reinforcement_learning.jl +++ b/src/reinforcement_learning.jl @@ -5,6 +5,7 @@ using Flux: InvDecay using Intervals using StaticArrays: SVector using Random: Random +using ProgressMeter: @showprogress using ..ReCo @@ -28,17 +29,19 @@ end mutable struct EnvParams action_space::Vector{Tuple{Float64,Float64}} + action_space_ind::Vector{Int64} distance_state_space::Vector{DistanceState} direction_state_space::Vector{DirectionState} state_space::Vector{Tuple{Union{DistanceState,Nothing},Union{DirectionState,Nothing}}} + state_space_ind::Vector{Int64} reward::Float64 function EnvParams( min_distance::Float64, max_distance::Float64; - n_v_actions::Int64=2, - n_ω_actions::Int64=3, - max_v::Float64=20.0, + n_v_actions::Int64=5, + n_ω_actions::Int64=5, + max_v::Float64=80.0, max_ω::Float64=π / 1.5, n_distance_states::Int64=3, n_direction_states::Int64=4, @@ -65,6 +68,8 @@ mutable struct EnvParams end end + action_space_ind = collect(1:n_actions) + distance_range = min_distance:((max_distance - min_distance) / n_distance_states):max_distance @@ -109,8 +114,18 @@ mutable struct EnvParams end state_space[ind] = (nothing, nothing) + state_space_ind = collect(1:n_states) + + initial_reward = 0.0 + return new( - action_space, distance_state_space, direction_state_space, state_space, 0.0 + action_space, + action_space_ind, + distance_state_space, + direction_state_space, + state_space, + state_space_ind, + initial_reward, ) end end @@ -118,14 +133,17 @@ end mutable struct Env <: AbstractEnv params::EnvParams particle::ReCo.Particle - state::Tuple{Union{DistanceState,Nothing},Union{DirectionState,Nothing}} + state_ind::Int64 function Env(params::EnvParams, particle::ReCo.Particle) - return new(params, particle, (nothing, nothing)) + # initial_state = (nothing, nothing) + initial_state_ind = length(params.state_space_ind) + + return new(params, particle, initial_state_ind) end end -function gen_policy(n_states, n_actions) +function gen_policy(n_states::Int64, n_actions::Int64) return QBasedPolicy(; learner=MonteCarloLearner(; approximator=TabularQApproximator(; @@ -145,9 +163,13 @@ struct Params{H<:AbstractHook} n_steps_before_actions_update::Int64 min_distance²::Vector{Float64} r⃗₁₂_to_min_distance_particle::Vector{SVector{2,Float64}} + goal_shape_ratio::Float64 function Params{H}( - n_particles::Int64, env_params::EnvParams, n_steps_before_actions_update::Int64 + n_particles::Int64, + env_params::EnvParams, + n_steps_before_actions_update::Int64, + goal_shape_ratio::Float64, ) where {H<:AbstractHook} policies = [ gen_policy(length(env_params.state_space), length(env_params.action_space)) for @@ -165,27 +187,32 @@ struct Params{H<:AbstractHook} n_steps_before_actions_update, zeros(n_particles), fill(SVector(0.0, 0.0), n_particles), + goal_shape_ratio, ) end end -RLBase.state_space(env::Env) = env.state_space +RLBase.state_space(env::Env) = env.params.state_space_ind -RLBase.state(env::Env) = env.state +RLBase.state(env::Env) = env.state_ind -RLBase.action_space(env::Env) = env.params.action_space +RLBase.action_space(env::Env) = env.params.action_space_ind RLBase.reward(env::Env) = env.params.reward +RLBase.is_terminated(::Env) = false + function pre_integration_hook!(rl_params::Params, n_particles::Int64) for i in 1:n_particles env = rl_params.envs[i] agent = rl_params.agents[i] - action = agent(env) + + action_ind = agent(env) + action = rl_params.env_params.action_space[action_ind] rl_params.actions[i] = action - agent(PRE_ACT_STAGE, env, action) - rl_params.hooks[i](PRE_ACT_STAGE, agent, env, action) + agent(PRE_ACT_STAGE, env, action_ind) + rl_params.hooks[i](PRE_ACT_STAGE, agent, env, action_ind) end return nothing @@ -209,17 +236,29 @@ function state_hook( return nothing end -function integration_hook(particle::ReCo.Particle, rl_params::Params, δt::Float64) +function integration_hook( + particle::ReCo.Particle, rl_params::Params, δt::Float64, si::Float64, co::Float64 +) action = rl_params.actions[particle.id] - particle.tmp_c += action[1] * δt + vδt = action[1] * δt + particle.tmp_c += SVector(vδt * co, vδt * si) particle.φ += action[2] * δt return nothing end +function get_state_ind( + state::T, states::Vector{T} +) where {T<:Tuple{Union{DistanceState,Nothing},Union{DirectionState,Nothing}}} + return findfirst(x -> x == state, states) +end + function post_integration_hook( - rl_params::Params, n_particles::Int64, particles::Vector{ReCo.Particle} + rl_params::Params, + n_particles::Int64, + particles::Vector{ReCo.Particle}, + half_box_len::Float64, ) env_direction_state = rl_params.env_params.direction_state_space[1] @@ -239,7 +278,8 @@ function post_integration_hook( end if isnothing(env_distance_state) - env.state = (nothing, nothing) + # (nothing, nothing) + env.state_ind = length(env.params.state_space) else r⃗₁₂ = rl_params.r⃗₁₂_to_min_distance_particle[i] si, co = sincos(particles[i].φ) @@ -259,9 +299,17 @@ function post_integration_hook( end end - env.state = (env_distance_state, env_direction_state) + state = (env_distance_state, env_direction_state) + env.state_ind = get_state_ind(state, env.params.state_space) end + env.params.reward = + 1 - + ( + ReCo.gyration_tensor_eigvals_ratio(particles, half_box_len) - + rl_params.goal_shape_ratio + )^2 + agent(POST_ACT_STAGE, env) rl_params.hooks[i](POST_ACT_STAGE, agent, env) end @@ -270,11 +318,13 @@ function post_integration_hook( end function run(; + goal_shape_ratio::Float64, n_episodes::Int64=100, - episode_duration::Float64=5.0, + episode_duration::Float64=100.0, update_actions_at::Float64=0.1, - n_particles::Int64=10, + n_particles::Int64=100, ) + @assert 0.0 <= goal_shape_ratio <= 1.0 @assert n_episodes > 0 @assert episode_duration > 0 @assert update_actions_at in 0.01:0.01:episode_duration @@ -282,7 +332,7 @@ function run(; Random.seed!(42) - sim_consts = ReCo.gen_sim_consts(n_particles, 0.0; skin_to_interaction_r_ratio=3.5) + sim_consts = ReCo.gen_sim_consts(n_particles, 0.0; skin_to_interaction_r_ratio=4.0) n_particles = sim_consts.n_particles env_params = EnvParams(sim_consts.particle_radius, sim_consts.skin_r) @@ -290,7 +340,7 @@ function run(; n_steps_before_actions_update = round(Int64, update_actions_at / sim_consts.δt) rl_params = Params{TotalRewardPerEpisode}( - n_particles, env_params, n_steps_before_actions_update + n_particles, env_params, n_steps_before_actions_update, goal_shape_ratio ) for i in 1:n_particles @@ -301,7 +351,7 @@ function run(; agent(PRE_EXPERIMENT_STAGE, env) end - for episode in 1:n_episodes + @showprogress 0.6 for episode in 1:n_episodes dir, particles = ReCo.init_sim_with_sim_consts(sim_consts; parent_dir="RL") for i in 1:n_particles @@ -314,7 +364,7 @@ function run(; for i in 1:n_particles rl_params.envs[i].particle = particles[i] - rl_params.envs[i].state = (nothing, nothing) + rl_params.envs[i].state_ind = length(rl_params.env_params.state_space) end rl_params.env_params.reward = 0.0 @@ -339,7 +389,7 @@ function run(; rl_params.hooks[i](POST_EXPERIMENT_STAGE, agent, env) end - return rl_params.hooks + return rl_params end end # module \ No newline at end of file diff --git a/src/setup.jl b/src/setup.jl index fe39406..05a7c41 100644 --- a/src/setup.jl +++ b/src/setup.jl @@ -70,16 +70,16 @@ function gen_sim_consts( max_approach_after_one_integration_step = buffer * (2 * v₀ * δt) / interaction_r @assert skin_to_interaction_r_ratio >= 1 + max_approach_after_one_integration_step - skin_r = skin_to_interaction_r_ratio * interaction_r n_steps_before_verlet_list_update = round( Int64, (skin_to_interaction_r_ratio - 1) / max_approach_after_one_integration_step, ) else - skin_r = 1.5 * interaction_r n_steps_before_verlet_list_update = 100 end + skin_r = skin_to_interaction_r_ratio * interaction_r + grid_n = round(Int64, ceil(sqrt(n_particles))) n_particles = grid_n^2 diff --git a/src/simulation.jl b/src/simulation.jl index 379dd00..68dc554 100644 --- a/src/simulation.jl +++ b/src/simulation.jl @@ -41,8 +41,11 @@ function update_verlet_lists!(args, cl) end function euler!( - args, state_hook::F, integration_hook::F, rl_params::Union{RL.Params,Nothing} -) where {F<:Function} + args, + state_hook::Function, + integration_hook::Function, + rl_params::Union{RL.Params,Nothing}, +) for id1 in 1:(args.n_particles - 1) p1 = args.particles[id1] p1_c = p1.c @@ -74,11 +77,11 @@ function euler!( args.v₀δt * si + args.c₃ * rand_normal01(), ) - p.φ += args.c₄ * rand_normal01() - restrict_coordinates!(p, args.half_box_len) - integration_hook(p, rl_params, args.δt) + integration_hook(p, rl_params, args.δt, si, co) + + p.φ += args.c₄ * rand_normal01() p.c = p.tmp_c end @@ -153,7 +156,9 @@ function simulate( euler!(args, state_hook, integration_hook, rl_params) if run_hooks - post_integration_hook(rl_params, args.n_particles, args.particles) + post_integration_hook( + rl_params, args.n_particles, args.particles, args.half_box_len + ) state_hook = empty_hook end end