mirror of
https://gitlab.rlp.net/mobitar/ReCo.jl.git
synced 2024-12-27 01:11:04 +00:00
Fix typos
This commit is contained in:
parent
9c3d2fd562
commit
a4950979fe
41 changed files with 45 additions and 45 deletions
|
@ -1 +1 @@
|
||||||
style = "blue"
|
style = "blue"
|
||||||
|
|
|
@ -8,7 +8,7 @@ image:https://img.shields.io/badge/code%20style-blue-4495d1.svg[Code Style: Blue
|
||||||
|
|
||||||
== Setup
|
== Setup
|
||||||
|
|
||||||
The following steps from the setup have to be followed everytime before running anything in the following sections. An exception is installing the dependencies which has to be done only the first time.
|
The following steps from the setup have to be followed every time before running anything in the following sections. An exception is installing the dependencies which has to be done only the first time.
|
||||||
|
|
||||||
=== Launch Julia
|
=== Launch Julia
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ julia --threads auto
|
||||||
|
|
||||||
=== Activating environment
|
=== Activating environment
|
||||||
|
|
||||||
After launching Julia, the package environment has to be activated by running the follwing in the REPL:
|
After launching Julia, the package environment has to be activated by running the following in the REPL:
|
||||||
|
|
||||||
[source,julia]
|
[source,julia]
|
||||||
----
|
----
|
||||||
|
@ -121,7 +121,7 @@ The documentation of `run_rl` includes all possible optional arguments.
|
||||||
env_helper.shared.agent.policy.learner.approximator.table
|
env_helper.shared.agent.policy.learner.approximator.table
|
||||||
----
|
----
|
||||||
|
|
||||||
To generate a LaTeX table with the states and actions combintation names for the Q-matrix, run the follwing:
|
To generate a LaTeX table with the states and actions combintation names for the Q-matrix, run the following:
|
||||||
|
|
||||||
[source,julia]
|
[source,julia]
|
||||||
----
|
----
|
||||||
|
@ -203,4 +203,4 @@ The output is `ReCo.jl/exports/graphics/reward_discount_analysis.pdf`.
|
||||||
|
|
||||||
== Graphics
|
== Graphics
|
||||||
|
|
||||||
The directory `ReCo.jl/graphics` has some Julia files that generate graphics related to this package. The function in every file that has to be run to generate the corresponding graphics starts with `plot_` or `gen_`. The output is placed in `ReCo.jl/exports/graphics`.
|
The directory `ReCo.jl/graphics` has some Julia files that generate graphics related to this package. The function in every file that has to be run to generate the corresponding graphics starts with `plot_` or `gen_`. The output is placed in `ReCo.jl/exports/graphics`.
|
||||||
|
|
|
@ -110,4 +110,4 @@ function plot_mean_κ(; rl_dir::String, n_last_episodes::Int64)
|
||||||
save_fig("mean_kappa.pdf", fig; parent_dir=rl_dir)
|
save_fig("mean_kappa.pdf", fig; parent_dir=rl_dir)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -245,4 +245,4 @@ function run_random_walk()
|
||||||
plot_random_walk(; T=100_000.0, v₀=0.0, seed=12345)
|
plot_random_walk(; T=100_000.0, v₀=0.0, seed=12345)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -238,4 +238,4 @@ function run_radial_distribution_analysis()
|
||||||
)
|
)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -75,4 +75,4 @@ function run_reward_discount_analysis()
|
||||||
)
|
)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -127,4 +127,4 @@ function gen_COM_graphics()
|
||||||
finish()
|
finish()
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -28,4 +28,4 @@ function gen_elliptical_distance_graphics()
|
||||||
save_fig("elliptical_distance.pdf", fig)
|
save_fig("elliptical_distance.pdf", fig)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -57,4 +57,4 @@ function plot_potentials()
|
||||||
save_fig("potential.pdf", fig)
|
save_fig("potential.pdf", fig)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
|
@ -84,4 +84,4 @@ function gen_rdf_graphics()
|
||||||
finish()
|
finish()
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -26,4 +26,4 @@ function plot_reward_function()
|
||||||
save_fig("reward_shaping.pdf", fig)
|
save_fig("reward_shaping.pdf", fig)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -93,4 +93,4 @@ function gen_verlet_and_cell_lists_graphics()
|
||||||
finish()
|
finish()
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -46,4 +46,4 @@ function run_benchmarks(
|
||||||
return dir
|
return dir
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -6,4 +6,4 @@ function method_not_implemented()
|
||||||
return error("Method not implemented!")
|
return error("Method not implemented!")
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -26,4 +26,4 @@ function norm2d(v::SVector{2,R}) where {R<:Real}
|
||||||
return sqrt(sq_norm2d(v))
|
return sqrt(sq_norm2d(v))
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -32,4 +32,4 @@ function iterate(pv::PreVector, state::UInt64=UInt64(1))
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -118,4 +118,4 @@ function gen_action_spaces_labels(
|
||||||
actions_labels::NTuple{N,String}, action_spaces::NTuple{N,AbstractRange}
|
actions_labels::NTuple{N,String}, action_spaces::NTuple{N,AbstractRange}
|
||||||
) where {N}
|
) where {N}
|
||||||
return [gen_action_space_labels(actions_labels[i], action_spaces[i]) for i in 1:N]
|
return [gen_action_space_labels(actions_labels[i], action_spaces[i]) for i in 1:N]
|
||||||
end
|
end
|
||||||
|
|
|
@ -53,4 +53,4 @@ end
|
||||||
|
|
||||||
function get_env_agent_hook(env_helper::EnvHelper)
|
function get_env_agent_hook(env_helper::EnvHelper)
|
||||||
return (env_helper.shared.env, env_helper.shared.agent, env_helper.shared.hook)
|
return (env_helper.shared.env, env_helper.shared.agent, env_helper.shared.hook)
|
||||||
end
|
end
|
||||||
|
|
|
@ -158,4 +158,4 @@ function update_reward!(
|
||||||
set_normalized_reward!(env, reward, env_helper)
|
set_normalized_reward!(env, reward, env_helper)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -145,4 +145,4 @@ function update_reward!(
|
||||||
end
|
end
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -208,4 +208,4 @@ function update_reward!(
|
||||||
end
|
end
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -189,4 +189,4 @@ function update_reward!(
|
||||||
end
|
end
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -146,4 +146,4 @@ function update_reward!(
|
||||||
set_normalized_reward!(env, reward, env_helper)
|
set_normalized_reward!(env, reward, env_helper)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -156,4 +156,4 @@ function update_reward!(
|
||||||
set_normalized_reward!(env, reward, env_helper)
|
set_normalized_reward!(env, reward, env_helper)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -104,4 +104,4 @@ function update_reward!(
|
||||||
set_normalized_reward!(env, reward, env_helper)
|
set_normalized_reward!(env, reward, env_helper)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -83,4 +83,4 @@ end
|
||||||
|
|
||||||
function copy_states_to_old_states_hook!(::Nothing)
|
function copy_states_to_old_states_hook!(::Nothing)
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -241,4 +241,4 @@ include("Envs/LocalCOMEnv.jl")
|
||||||
include("Envs/OriginCompassEnv.jl")
|
include("Envs/OriginCompassEnv.jl")
|
||||||
include("Envs/COMCompassEnv.jl")
|
include("Envs/COMCompassEnv.jl")
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -9,4 +9,4 @@ function set_normalized_reward!(
|
||||||
env.shared.reward = unnormalized_reward / normalization
|
env.shared.reward = unnormalized_reward / normalization
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -52,4 +52,4 @@ function find_state_ind(value::Float64, state_space::Vector{Interval})::Int64
|
||||||
return ind
|
return ind
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -54,4 +54,4 @@ function latex_rl_table(env_helper::ReCo.RL.EnvHelper, filename_without_extensio
|
||||||
latex_table(df, "$filename_without_extension.tex")
|
latex_table(df, "$filename_without_extension.tex")
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -54,4 +54,4 @@ using .SnapshotPlot
|
||||||
include("Visualization/RewardsPlot.jl")
|
include("Visualization/RewardsPlot.jl")
|
||||||
using .RewardsPlot
|
using .RewardsPlot
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -172,4 +172,4 @@ function elliptical_distance(
|
||||||
return sqrt(x^2 + (y / elliptical_b_a_ratio)^2)
|
return sqrt(x^2 + (y / elliptical_b_a_ratio)^2)
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -279,4 +279,4 @@ function animate(
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -51,4 +51,4 @@ function plot_rewards(
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -112,4 +112,4 @@ function plot_snapshot(
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
||||||
end # module
|
end # module
|
||||||
|
|
|
@ -26,4 +26,4 @@ function gen_axis_and_colorbar(
|
||||||
Colorbar(fig[1, 2]; limits=(0, 2), colormap=color_scheme, label=L"\varphi / \pi")
|
Colorbar(fig[1, 2]; limits=(0, 2), colormap=color_scheme, label=L"\varphi / \pi")
|
||||||
|
|
||||||
return (ax, color_scheme)
|
return (ax, color_scheme)
|
||||||
end
|
end
|
||||||
|
|
|
@ -28,4 +28,4 @@ function save_fig(filename::String, fig::Figure; parent_dir="exports/graphics")
|
||||||
save("$parent_dir/$filename", fig; pt_per_unit=1)
|
save("$parent_dir/$filename", fig; pt_per_unit=1)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -185,4 +185,4 @@ function get_bundle_to_snapshot(
|
||||||
return (bundle, bundle_snapshot)
|
return (bundle, bundle_snapshot)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -27,7 +27,7 @@ Return `nothing`.
|
||||||
- `duration::Float64`: Duration of the simulation.
|
- `duration::Float64`: Duration of the simulation.
|
||||||
- `snapshot_at::Float64=$DEFAULT_SNAPSHOT_AT`: Snapshot time interval.
|
- `snapshot_at::Float64=$DEFAULT_SNAPSHOT_AT`: Snapshot time interval.
|
||||||
- `seed::Int64=$DEFAULT_SEED`: Random number generator seed.
|
- `seed::Int64=$DEFAULT_SEED`: Random number generator seed.
|
||||||
- `n_bundle_snapshots::Int64=$DEFAULT_N_BUNDLE_SNAPSHOTS`: Number of snapshots in a bundle. This number is relevant for long simulations that can be stopped while running. A simulation can be continued from the last bundle of snapshots. If the number of snapshots in a bundle is too high and the simulation is stopped, many of the last snapshots can be lost. A low number results in high IO since snapshots are then bundled and stored more often. For example, setting this number to 1 results in saving every snapshot immediately without bundeling it with other snapshots which would be more efficient. Setting the number to 1000 could mean loosing 999 snapshots in the worst case if the simulation is stopped before having 1000 snapshots to bundle and save.
|
- `n_bundle_snapshots::Int64=$DEFAULT_N_BUNDLE_SNAPSHOTS`: Number of snapshots in a bundle. This number is relevant for long simulations that can be stopped while running. A simulation can be continued from the last bundle of snapshots. If the number of snapshots in a bundle is too high and the simulation is stopped, many of the last snapshots can be lost. A low number results in high IO since snapshots are then bundled and stored more often. For example, setting this number to 1 results in saving every snapshot immediately without bundling it with other snapshots which would be more efficient. Setting the number to 1000 could mean loosing 999 snapshots in the worst case if the simulation is stopped before having 1000 snapshots to bundle and save.
|
||||||
- `env_helper::Union{RL.EnvHelper,Nothing}=nothing`: Environment helper. It should be left as the default `nothing` unless this function is used internally for reinforcement learning.
|
- `env_helper::Union{RL.EnvHelper,Nothing}=nothing`: Environment helper. It should be left as the default `nothing` unless this function is used internally for reinforcement learning.
|
||||||
- `show_progress::Bool=$DEFAULT_SHOW_PROGRESS`: Show simulation progress bar.
|
- `show_progress::Bool=$DEFAULT_SHOW_PROGRESS`: Show simulation progress bar.
|
||||||
"""
|
"""
|
||||||
|
@ -144,4 +144,4 @@ function run_sim(
|
||||||
)
|
)
|
||||||
|
|
||||||
return nothing
|
return nothing
|
||||||
end
|
end
|
||||||
|
|
|
@ -199,4 +199,4 @@ function init_sim(;
|
||||||
return init_sim_with_sim_consts(
|
return init_sim_with_sim_consts(
|
||||||
sim_consts; exports_dir=exports_dir, parent_dir=parent_dir, comment=comment
|
sim_consts; exports_dir=exports_dir, parent_dir=parent_dir, comment=comment
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
|
@ -73,4 +73,4 @@ end
|
||||||
@testset "gyration_tensor" begin
|
@testset "gyration_tensor" begin
|
||||||
@test ReCo.Shape.gyration_tensor_eigvals_ratio(particles, half_box_len) == 1.0
|
@test ReCo.Shape.gyration_tensor_eigvals_ratio(particles, half_box_len) == 1.0
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
Loading…
Reference in a new issue