1
0
Fork 0
mirror of https://gitlab.rlp.net/mobitar/ReCo.jl.git synced 2024-12-21 00:51:21 +00:00

Add plot_rewards and latex_rl_table documentation

This commit is contained in:
Mo8it 2022-02-08 23:04:22 +01:00
parent 792fe7b437
commit 5901cad09f
8 changed files with 53 additions and 12 deletions

View file

@ -1,6 +1,6 @@
= ReCo.jl
:source-highlighter: highlight.js
:highlightjs-languages: bash, julia
:highlightjs-languages: bash, julia, latex
image:https://img.shields.io/badge/code%20style-blue-4495d1.svg[Code Style: Blue, link=https://github.com/invenia/BlueStyle]
@ -8,7 +8,7 @@ image:https://img.shields.io/badge/code%20style-blue-4495d1.svg[Code Style: Blue
== Setup
The steps from the setup have to be followed before running anything in the following sections.
The following steps from the setup have to be followed everytime before running anything in the following sections. An exception is installing the dependencies which has to be done only the first time.
=== Launch Julia
@ -32,15 +32,17 @@ using Pkg
Pkg.activate(".")
----
=== Install dependencies
=== Install/update dependencies
After activating the package environment, run the following to install the package dependencies:
After activating the package environment, run the following to install/update the package dependencies:
[source,julia]
----
Pkg.instantiate()
Pkg.update()
----
This step has to be done by running the line above for the first time using the package to install its dependencies. Running the line above after the first time will update the installed dependencies which is optional.
=== Import the package
You can import the package by running:
@ -56,7 +58,7 @@ This will export the package's methods that are intended to be used by the end u
To access the documentation of the presented package methods further in this README, run `using ReCo` first. Then, enter the help mode by pressing `?` in the REPL. Now, enter the method's name followed by enter to see its documentation.
== Run simulation
== Run a simulation
Initialize a simulation with 100 particles having a self-propulsion velocity of 40.0 and return the relative path to the simulation directory:
@ -98,7 +100,7 @@ plot_snapshot(sim_dir)
This will ask for the number of the snapshot to plot out of the total number of snapshots. The method's documentation includes all possible optional arguments and where the output can be found.
== Run reinforcement learning process
== Run a reinforcement learning process
Run a reinforcement learning process and return the environment helper and the the path of the process directory relative to the directory `ReCo.jl`:
[source,julia]
@ -125,10 +127,17 @@ To generate a LaTeX table with the states and actions combintation names for the
[source,julia]
----
include("src/RL/latex_table.jl")
latex_rl_table(env_helper, FILENAME)
latex_rl_table(env_helper, FILENAME_WITHOUT_EXTENSION)
----
`FILENAME` has to be replaced by the wanted file name without extension of the `.tex` file. This file can then be found under `ReCo.jl/exports/FILENAME.tex`.
`FILENAME_WITHOUT_EXTENSION` has to be replaced by the wanted file name without extension of the `.tex` file. The documentation of `latex_rl_table` explains where the output is placed.
The output file can be used in a LaTeX document:
[source,latex]
----
\input{FILENAME_WITHOUT_EXTENSION}
----
=== Rewards
@ -146,6 +155,8 @@ To plot the rewards, run the following:
plot_rewards(rl_dir)
----
The method's documentation explains where the output is placed.
=== Mean kappa
To plot the mean of kappa as the ratio of the eigenvalues of the gyration tensor, run the following:

View file

@ -7,7 +7,7 @@ using StaticArrays: SVector
"""
angle2(a::SVector{2,R}, b::SVector{2,R}) where {R<:Real}
Returns the angle φ from vector a to b while φ [-π, π].
Return the angle `φ` from vector `a` to `b` while `φ` [-π, π].
"""
function angle2(a::SVector{2,R}, b::SVector{2,R}) where {R<:Real}
θ_a = atan(a[2], a[1])

View file

@ -81,6 +81,8 @@ end
Run a reinforcement learning process and return the tuple (`env_helper`, `rl_dir`). `env_helper` is the environment helper and `rl_dir` is the path of the process directory relative to the directory `ReCo.jl`.
Return `nothing`.
# Arguments
- `EnvType::Type{<:Env}`: Environment type. It has to be one of the environments named after the file names in the directory `ReCo.jl/RL/Envs`, for example: `LocalCOMEnv`. A description of an environment is included at the beginning of the corresponding file.
- `process_dir::String=string(EnvType)`: Path to the reinforcement learning process directory relative to `ReCo.jl/exports/RL`.

View file

@ -11,6 +11,15 @@ function latex_table(
return nothing
end
"""
latex_rl_table(env_helper, filename_without_extension::String)
Generate a LaTeX table to the Q-matrix of `env_helper`.
The output is `ReCo.jl/exports/filename_without_extension.tex`. `env_helper` has to be an environment helper with the abstract type `EnvHelper`.
Return `nothing`.
"""
function latex_rl_table(env_helper, filename_without_extension::String)
table = copy(env_helper.shared.agent.policy.learner.approximator.table)

View file

@ -155,6 +155,8 @@ Animate a simulation.
The output is `sim_dir/animation.mkv`.
Return `nothing`.
# Arguments
- `sim_dir::String`: Simulation directory.
- `framerate::Int64=$DEFAULT_FRAMERATE`: Framerate

View file

@ -9,6 +9,8 @@ using ReCo: ReCo
include("common_CairoMakie.jl")
const DEFAULT_ENV_HELPER_FILENAME = "env_helper.jld2"
function plot_rewards_from_env_helper(; env_helper::ReCo.RL.EnvHelper, rl_dir::String)
rewards = env_helper.shared.hook.rewards
n_episodes = length(rewards)
@ -30,8 +32,19 @@ function plot_rewards_from_env_helper(; env_helper::ReCo.RL.EnvHelper, rl_dir::S
return nothing
end
function plot_rewards(rl_dir::String, env_helper_file_name::String="env_helper.jld2")
env_helper::ReCo.RL.EnvHelper = JLD2.load_object("$rl_dir/$env_helper_file_name")
"""
plot_rewards(rl_dir::String, env_helper_filename::String="$DEFAULT_ENV_HELPER_FILENAME")
Plot the rewards of the reinforcement learning process at the directory `rl_dir`.
The output is `rl_dir/rewards.pdf`. `env_helper_filename` can be provided if the name of the `jld2`-file of the environment helper differs from the default `$DEFAULT_ENV_HELPER_FILENAME`.
Return `nothing`.
"""
function plot_rewards(
rl_dir::String, env_helper_filename::String=DEFAULT_ENV_HELPER_FILENAME
)
env_helper::ReCo.RL.EnvHelper = JLD2.load_object("$rl_dir/$env_helper_filename")
plot_rewards_from_env_helper(; env_helper, rl_dir)

View file

@ -20,6 +20,8 @@ This method starts or resumes a simulation. For long simulations, the simulation
Some of the last snapshots might be lost if the simulations is stopped (see the argument `n_bundle_snapshots`).
Return `nothing`.
# Arguments
- `sim_dir::String`: Relative path of the initialized simulation directory.
- `duration::Float64`: Duration of the simulation.

View file

@ -163,6 +163,8 @@ end
Initialize a simulation and return the relative path of the simulation directory.
Return `nothing`.
# Arguments
- `n_particles::Int64`: Number of particles.
- `v₀::Float64`: Self-propulsion velocity. Only values in the interval [0.0, 80.0] are tested.