abstract type Env <: AbstractEnv end mutable struct EnvSharedProps{n_state_dims} n_actions::Int64 action_space::Vector{SVector{2,Float64}} action_id_space::OneTo{Int64} n_states::Int64 state_id_tensor::Array{Int64,n_state_dims} state_id_space::OneTo{Int64} state_id::Int64 reward::Float64 terminated::Bool function EnvSharedProps( n_states::Int64, # Can be different from the sum of state_id_tensor_dims state_id_tensor_dims::NTuple{n_state_dims,Int64}; n_v_actions::Int64=2, n_ω_actions::Int64=3, max_v::Float64=40.0, max_ω::Float64=π / 2, ) where {n_state_dims} @assert n_v_actions > 1 @assert n_ω_actions > 1 @assert max_v > 0 @assert max_ω > 0 v_action_space = range(; start=0.0, stop=max_v, length=n_v_actions) ω_action_space = range(; start=-max_ω, stop=max_ω, length=n_ω_actions) n_actions = n_v_actions * n_ω_actions action_space = Vector{SVector{2,Float64}}(undef, n_actions) ind = 1 for v in v_action_space for ω in ω_action_space action_space[ind] = SVector(v, ω) ind += 1 end end action_id_space = OneTo(n_actions) state_id_tensor = Array{Int64,n_state_dims}(undef, state_id_tensor_dims) id = 1 for ind in eachindex(state_id_tensor) state_id_tensor[ind] = id id += 1 end state_id_space = OneTo(n_states) return new{n_state_dims}( n_actions, action_space, action_id_space, n_states, state_id_tensor, state_id_space, INITIAL_STATE_IND, INITIAL_REWARD, false, ) end end function reset!(env::Env) env.shared.terminated = false return nothing end function RLBase.state_space(env::Env) return env.shared.state_id_space end function RLBase.state(env::Env) return env.shared.state_id end function RLBase.action_space(env::Env) return env.shared.action_id_space end function RLBase.reward(env::Env) return env.shared.reward end function RLBase.is_terminated(env::Env) return env.shared.terminated end