1
0
Fork 0
mirror of https://gitlab.rlp.net/mobitar/ReCo.jl.git synced 2024-11-08 22:21:08 +00:00
ReCo.jl/visualization/RewardsPlot.jl

55 lines
1.3 KiB
Julia
Raw Normal View History

2022-04-05 01:25:01 +00:00
module RewardsPlot
export plot_rewards
using CairoMakie
using JLD2: JLD2
using ReCo: ReCo
include("common_CairoMakie.jl")
const DEFAULT_ENV_HELPER_FILENAME = "env_helper.jld2"
function plot_rewards_from_env_helper(; env_helper::ReCo.RL.EnvHelper, rl_dir::String)
rewards = env_helper.shared.hook.rewards
n_episodes = length(rewards)
init_cairomakie!()
fig = gen_figure(; padding=10)
ax = Axis(
fig[1, 1]; xlabel="Episode", ylabel="Reward", limits=((0, n_episodes), nothing)
)
lines!(ax, 1:n_episodes, rewards)
set_gaps!(fig)
save_fig("rewards.pdf", fig; parent_dir=rl_dir)
return nothing
end
"""
plot_rewards(rl_dir::String, env_helper_filename::String="$DEFAULT_ENV_HELPER_FILENAME")
Plot the rewards of the reinforcement learning process at the directory `rl_dir`.
The output is `rl_dir/rewards.pdf`. `env_helper_filename` can be provided if the name of the `jld2`-file of the environment helper differs from the default `$DEFAULT_ENV_HELPER_FILENAME`.
Return `nothing`.
"""
function plot_rewards(
rl_dir::String, env_helper_filename::String=DEFAULT_ENV_HELPER_FILENAME
)
env_helper::ReCo.RL.EnvHelper = JLD2.load_object("$rl_dir/$env_helper_filename")
plot_rewards_from_env_helper(; env_helper, rl_dir)
return nothing
end
end # module