1
0
Fork 0
mirror of https://gitlab.rlp.net/mobitar/ReCo.jl.git synced 2024-11-08 22:21:08 +00:00

Better title for only one episode in mean_kappa

This commit is contained in:
Mo8it 2022-02-01 03:09:27 +01:00
parent 7e52270d86
commit 1409206eab
3 changed files with 10 additions and 4 deletions

View file

@ -65,12 +65,18 @@ function plot_mean_kappa(; rl_dir::String, n_last_episodes::Int64)
init_cairomakie!() init_cairomakie!()
fig = gen_figure(; padding=9) fig = gen_figure(; padding=9)
if n_last_episodes > 1
title = "Averaged over last $n_last_episodes episodes"
else
title = "Result of only one episode"
end
ax = Axis( ax = Axis(
fig[1, 1]; fig[1, 1];
xlabel="Frame", xlabel="Frame",
ylabel=L"\kappa", ylabel=L"\kappa",
limits=(1, total_n_snapshots, 0.0, 1.04), limits=(1, total_n_snapshots, 0.0, 1.04),
title="Averaged over last $n_last_episodes episodes", title=title,
) )
lines!(ax, 1:total_n_snapshots, snapshot_κs; label=L"\kappa") lines!(ax, 1:total_n_snapshots, snapshot_κs; label=L"\kappa")

View file

@ -37,8 +37,8 @@ function gen_agent(
n_states::Int64, n_actions::Int64, ϵ_stable::Float64, reward_discount::Float64 n_states::Int64, n_actions::Int64, ϵ_stable::Float64, reward_discount::Float64
) )
# TODO: Optimize warming up and decay # TODO: Optimize warming up and decay
warmup_steps = 400_000 warmup_steps = 200_000
decay_steps = 5_000_000 decay_steps = 4_000_000
policy = QBasedPolicy(; policy = QBasedPolicy(;
learner=MonteCarloLearner(; learner=MonteCarloLearner(;

View file

@ -15,7 +15,7 @@ function plot_rewards_from_env_helper(; env_helper::ReCo.RL.EnvHelper, rl_dir::S
init_cairomakie!() init_cairomakie!()
fig = gen_figure() fig = gen_figure(; padding=10)
ax = Axis( ax = Axis(
fig[1, 1]; xlabel="Episode", ylabel="Reward", limits=((0, n_episodes), nothing) fig[1, 1]; xlabel="Episode", ylabel="Reward", limits=((0, n_episodes), nothing)