Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Zahra Rajabi
pymdptoolbox
Commits
831ed8fe
Commit
831ed8fe
authored
Aug 18, 2013
by
Steven Cordwell
Browse files
initial movements to convert to package
parent
4e56cb61
Changes
4
Hide whitespace changes
Inline
Sidebyside
__init__.py
0 → 100644
View file @
831ed8fe
# * coding: utf8 *
example.py
0 → 100644
View file @
831ed8fe
# * coding: utf8 *
"""
Created on Sun Aug 18 14:32:25 2013
@author: steve
"""
from
random
import
random
from
numpy
import
diag
,
ones
,
where
,
zeros
from
numpy.random
import
rand
,
randint
from
scipy.sparse
import
coo_matrix
,
dok_matrix
def
exampleForest
(
S
=
3
,
r1
=
4
,
r2
=
2
,
p
=
0.1
,
is_sparse
=
False
):
"""Generate a MDP example based on a simple forest management scenario.
This function is used to generate a transition probability
(``A`` × ``S`` × ``S``) array ``P`` and a reward (``S`` × ``A``) matrix
``R`` that model the following problem. A forest is managed by two actions:
'Wait' and 'Cut'. An action is decided each year with first the objective
to maintain an old forest for wildlife and second to make money selling cut
wood. Each year there is a probability ``p`` that a fire burns the forest.
Here is how the problem is modelled.
Let {1, 2 . . . ``S`` } be the states of the forest, with ``S`` being the
oldest. Let 'Wait' be action 1 and 'Cut' action 2.
After a fire, the forest is in the youngest state, that is state 1.
The transition matrix P of the problem can then be defined as follows::
 p 1p 0.......0 
 . 0 1p 0....0 
P[1,:,:] =  . . 0 . 
 . . . 
 . . 1p 
 p 0 0....0 1p 
 1 0..........0 
 . . . 
P[2,:,:] =  . . . 
 . . . 
 . . . 
 1 0..........0 
The reward matrix R is defined as follows::
 0 
 . 
R[:,1] =  . 
 . 
 0 
 r1 
 0 
 1 
R[:,2] =  . 
 . 
 1 
 r2 
Parameters

S : int, optional
The number of states, which should be an integer greater than 0. By
default it is 3.
r1 : float, optional
The reward when the forest is in its oldest state and action 'Wait' is
performed. By default it is 4.
r2 : float, optional
The reward when the forest is in its oldest state and action 'Cut' is
performed. By default it is 2.
p : float, optional
The probability of wild fire occurence, in the range ]0, 1[. By default
it is 0.1.
Returns

out : tuple
``out[1]`` contains the transition probability matrix P with a shape of
(A, S, S). ``out[2]`` contains the reward matrix R with a shape of
(S, A).
Examples

>>> import mdp
>>> P, R = mdp.exampleForest()
>>> P
array([[[ 0.1, 0.9, 0. ],
[ 0.1, 0. , 0.9],
[ 0.1, 0. , 0.9]],
<BLANKLINE>
[[ 1. , 0. , 0. ],
[ 1. , 0. , 0. ],
[ 1. , 0. , 0. ]]])
>>> R
array([[ 0., 0.],
[ 0., 1.],
[ 4., 2.]])
"""
if
S
<=
1
:
raise
ValueError
(
mdperr
[
"S_gt_1"
])
if
(
r1
<=
0
)
or
(
r2
<=
0
):
raise
ValueError
(
mdperr
[
"R_gt_0"
])
if
(
p
<
0
)
or
(
p
>
1
):
raise
ValueError
(
mdperr
[
"prob_in01"
])
# Definition of Transition matrix P(:,:,1) associated to action Wait
# (action 1) and P(:,:,2) associated to action Cut (action 2)
#  p 1p 0.......0   1 0..........0 
#  . 0 1p 0....0   . . . 
# P(:,:,1) =  . . 0 .  and P(:,:,2) =  . . . 
#  . . .   . . . 
#  . . 1p   . . . 
#  p 0 0....0 1p   1 0..........0 
if
is_sparse
:
P
=
[]
rows
=
range
(
S
)
*
2
cols
=
[
0
]
*
S
+
range
(
1
,
S
)
+
[
S

1
]
vals
=
[
p
]
*
S
+
[
1

p
]
*
S
P
.
append
(
coo_matrix
((
vals
,
(
rows
,
cols
)),
shape
=
(
S
,
S
)).
tocsr
())
rows
=
range
(
S
)
cols
=
[
0
]
*
S
vals
=
[
1
]
*
S
P
.
append
(
coo_matrix
((
vals
,
(
rows
,
cols
)),
shape
=
(
S
,
S
)).
tocsr
())
else
:
P
=
zeros
((
2
,
S
,
S
))
P
[
0
,
:,
:]
=
(
1

p
)
*
diag
(
ones
(
S

1
),
1
)
P
[
0
,
:,
0
]
=
p
P
[
0
,
S

1
,
S

1
]
=
(
1

p
)
P
[
1
,
:,
:]
=
zeros
((
S
,
S
))
P
[
1
,
:,
0
]
=
1
# Definition of Reward matrix R1 associated to action Wait and
# R2 associated to action Cut
#  0   0 
#  .   1 
# R(:,1) =  .  and R(:,2) =  . 
#  .   . 
#  0   1 
#  r1   r2 
R
=
zeros
((
S
,
2
))
R
[
S

1
,
0
]
=
r1
R
[:,
1
]
=
ones
(
S
)
R
[
0
,
1
]
=
0
R
[
S

1
,
1
]
=
r2
# we want to return the generated transition and reward matrices
return
(
P
,
R
)
def
exampleRand
(
S
,
A
,
is_sparse
=
False
,
mask
=
None
):
"""Generate a random Markov Decision Process.
Parameters

S : int
number of states (> 0)
A : int
number of actions (> 0)
is_sparse : logical, optional
false to have matrices in dense format, true to have sparse
matrices (default false).
mask : array or None, optional
matrix with 0 and 1 (0 indicates a place for a zero
probability), (SxS) (default, random)
Returns

out : tuple
``out[1]`` contains the transition probability matrix P with a shape of
(A, S, S). ``out[2]`` contains the reward matrix R with a shape of
(S, A).
Examples

>>> import mdp
>>> P, R = mdp.exampleRand(5, 3)
"""
# making sure the states and actions are more than one
if
(
S
<
1
)
or
(
A
<
1
):
raise
ValueError
(
mdperr
[
"SA_gt_1"
])
# if the user hasn't specified a mask, then we will make a random one now
if
mask
is
not
None
:
# the mask needs to be SxS or AxSxS
try
:
if
mask
.
shape
not
in
((
S
,
S
),
(
A
,
S
,
S
)):
raise
ValueError
(
mdperr
[
"mask_SbyS"
])
except
AttributeError
:
raise
TypeError
(
mdperr
[
"mask_numpy"
])
# generate the transition and reward matrices based on S, A and mask
if
is_sparse
:
# definition of transition matrix : square stochastic matrix
P
=
[
None
]
*
A
# definition of reward matrix (values between 1 and +1)
R
=
[
None
]
*
A
for
a
in
xrange
(
A
):
# it may be more efficient to implement this by constructing lists
# of rows, columns and values then creating a coo_matrix, but this
# works for now
PP
=
dok_matrix
((
S
,
S
))
RR
=
dok_matrix
((
S
,
S
))
for
s
in
xrange
(
S
):
if
mask
is
None
:
m
=
rand
(
S
)
m
[
m
<=
2
/
3.0
]
=
0
m
[
m
>
2
/
3.0
]
=
1
elif
mask
.
shape
==
(
A
,
S
,
S
):
m
=
mask
[
a
][
s
]
# mask[a, s, :]
else
:
m
=
mask
[
s
]
n
=
int
(
m
.
sum
())
# m[s, :]
if
n
==
0
:
m
[
randint
(
0
,
S
)]
=
1
n
=
1
cols
=
where
(
m
)[
0
]
# m[s, :]
vals
=
rand
(
n
)
vals
=
vals
/
vals
.
sum
()
reward
=
2
*
rand
(
n
)

ones
(
n
)
PP
[
s
,
cols
]
=
vals
RR
[
s
,
cols
]
=
reward
# PP.tocsr() takes the same amount of time as PP.tocoo().tocsr()
# so constructing PP and RR as coo_matrix in the first place is
# probably "better"
P
[
a
]
=
PP
.
tocsr
()
R
[
a
]
=
RR
.
tocsr
()
else
:
# definition of transition matrix : square stochastic matrix
P
=
zeros
((
A
,
S
,
S
))
# definition of reward matrix (values between 1 and +1)
R
=
zeros
((
A
,
S
,
S
))
for
a
in
range
(
A
):
for
s
in
range
(
S
):
# create our own random mask if there is no user supplied one
if
mask
is
None
:
m
=
rand
(
S
)
r
=
random
()
m
[
m
<=
r
]
=
0
m
[
m
>
r
]
=
1
elif
mask
.
shape
==
(
A
,
S
,
S
):
m
=
mask
[
a
][
s
]
# mask[a, s, :]
else
:
m
=
mask
[
s
]
# Make sure that there is atleast one transition in each state
if
m
.
sum
()
==
0
:
m
[
randint
(
0
,
S
)]
=
1
n
=
1
P
[
a
][
s
]
=
m
*
rand
(
S
)
P
[
a
][
s
]
=
P
[
a
][
s
]
/
P
[
a
][
s
].
sum
()
R
[
a
][
s
]
=
(
m
*
(
2
*
rand
(
S
)

ones
(
S
,
dtype
=
int
)))
# we want to return the generated transition and reward matrices
return
(
P
,
R
)
mdp.py
View file @
831ed8fe
...
...
@@ 98,13 +98,12 @@ from math import ceil, log, sqrt
from
random
import
random
from
time
import
time
from
numpy
import
absolute
,
array
,
diag
,
empty
,
mean
,
mod
,
multiply
from
numpy
import
ndarray
,
ones
,
where
,
zeros
from
numpy.random
import
rand
,
randint
from
numpy
import
absolute
,
array
,
empty
,
mean
,
mod
,
multiply
from
numpy
import
ndarray
,
ones
,
zeros
from
numpy.random
import
randint
from
scipy.sparse
import
csr_matrix
as
sparse
from
scipy.sparse
import
coo_matrix
,
dok_matrix
# __all__ = ["
check
"
,
"checkSquareStochastic"]
from
utils
import
check
,
getSpan
# These need to be fixed so that we use classes derived from Error.
mdperr
=
{
...
...
@@ 158,467 +157,6 @@ mdperr = {
"PyMDPtoolbox: The maximum number of iterations must be greater than 0"
}
def
check
(
P
,
R
):
"""Check if P and R define a Markov Decision Process.
Let S = number of states, A = number of actions.
Parameters

P : array
The transition matrices. It can be a three dimensional array with
a shape of (A, S, S). It can also be a one dimensional arraye with
a shape of (A, ), where each element contains a matrix of shape (S, S)
which can possibly be sparse.
R : array
The reward matrix. It can be a three dimensional array with a
shape of (S, A, A). It can also be a one dimensional array with a
shape of (A, ), where each element contains matrix with a shape of
(S, S) which can possibly be sparse. It can also be an array with
a shape of (S, A) which can possibly be sparse.
Notes

Raises an error if P and R do not define a MDP.
"""
# Checking P
try
:
if
P
.
ndim
==
3
:
aP
,
sP0
,
sP1
=
P
.
shape
elif
P
.
ndim
==
1
:
# A hack so that we can go into the next tryexcept statement and
# continue checking from there
raise
AttributeError
else
:
raise
ValueError
(
mdperr
[
"P_shape"
])
except
AttributeError
:
try
:
aP
=
len
(
P
)
sP0
,
sP1
=
P
[
0
].
shape
for
aa
in
xrange
(
1
,
aP
):
sP0aa
,
sP1aa
=
P
[
aa
].
shape
if
(
sP0aa
!=
sP0
)
or
(
sP1aa
!=
sP1
):
raise
ValueError
(
mdperr
[
"obj_square"
])
except
AttributeError
:
raise
TypeError
(
mdperr
[
"P_shape"
])
except
:
raise
# Checking R
try
:
if
R
.
ndim
==
2
:
sR0
,
aR
=
R
.
shape
sR1
=
sR0
elif
R
.
ndim
==
3
:
aR
,
sR0
,
sR1
=
R
.
shape
elif
R
.
ndim
==
1
:
# A hack so that we can go into the next tryexcept statement
raise
AttributeError
else
:
raise
ValueError
(
mdperr
[
"R_shape"
])
except
AttributeError
:
try
:
aR
=
len
(
R
)
sR0
,
sR1
=
R
[
0
].
shape
for
aa
in
range
(
1
,
aR
):
sR0aa
,
sR1aa
=
R
[
aa
].
shape
if
((
sR0aa
!=
sR0
)
or
(
sR1aa
!=
sR1
)):
raise
ValueError
(
mdperr
[
"obj_square"
])
except
AttributeError
:
raise
ValueError
(
mdperr
[
"R_shape"
])
except
:
raise
# Checking dimensions
if
(
sP0
<
1
)
or
(
aP
<
1
)
or
(
sP0
!=
sP1
):
raise
ValueError
(
mdperr
[
"P_shape"
])
if
(
sR0
<
1
)
or
(
aR
<
1
)
or
(
sR0
!=
sR1
):
raise
ValueError
(
mdperr
[
"R_shape"
])
if
(
sP0
!=
sR0
)
or
(
aP
!=
aR
):
raise
ValueError
(
mdperr
[
"PR_incompat"
])
# Check that the P's are square and stochastic
for
aa
in
xrange
(
aP
):
checkSquareStochastic
(
P
[
aa
])
#checkSquareStochastic(P[aa, :, :])
# We are at the end of the checks, so if no exceptions have been raised
# then that means there are (hopefullly) no errors and we return None
return
None
# These are the old code comments, which need to be converted to
# information in the docstring:
#
# tranitions must be a numpy array either an AxSxS ndarray (with any
# dtype other than "object"); or, a 1xA ndarray with a "object" dtype,
# and each element containing an SxS array. An AxSxS array will be
# be converted to an object array. A numpy object array is similar to a
# MATLAB cell array.
#
# NumPy has an array type of 'object', which is roughly equivalent to
# the MATLAB cell array. These are most useful for storing sparse
# matrices as these can only have two dimensions whereas we want to be
# able to store a transition matrix for each action. If the dytpe of
# the transition probability array is object then we store this as
# P_is_object = True.
# If it is an object array, then it should only have one dimension
# otherwise fail with a message expalining why.
# If it is a normal array then the number of dimensions must be exactly
# three, otherwise fail with a message explaining why.
#
# As above but for the reward array. A difference is that the reward
# array can have either two or 3 dimensions.
#
# We want to make sure that the transition probability array and the
# reward array are in agreement. This means that both should show that
# there are the same number of actions and the same number of states.
# Furthermore the probability of transition matrices must be SxS in
# shape, so we check for that also.
#
# If the user has put their transition matrices into a numpy array
# with dtype of 'object', then it is possible that they have made a
# mistake and not all of the matrices are of the same shape. So,
# here we record the number of actions and states that the first
# matrix in element zero of the object array says it has. After
# that we check that every other matrix also reports the same
# number of actions and states, otherwise fail with an error.
# aP: the number of actions in the transition array. This
# corresponds to the number of elements in the object array.
#
# sP0: the number of states as reported by the number of rows of
# the transition matrix
# sP1: the number of states as reported by the number of columns of
# the transition matrix
#
# Now we check to see that every element of the object array holds
# a matrix of the same shape, otherwise fail.
#
# sp0aa and sp1aa represents the number of states in each
# subsequent element of the object array. If it doesn't match
# what was found in the first element, then we need to fail
# telling the user what needs to be fixed.
#
# if we are using a normal array for this, then the first
# dimension should be the number of actions, and the second and
# third should be the number of states
#
# the first dimension of the transition matrix must report the same
# number of states as the second dimension. If not then we are not
# dealing with a square matrix and it is not a valid transition
# probability. Also, if the number of actions is less than one, or the
# number of states is less than one, then it also is not a valid
# transition probability.
#
# now we check that each transition matrix is squarestochastic. For
# object arrays this is the matrix held in each element, but for
# normal arrays this is a matrix formed by taking a slice of the array
#
# if the rewarad array has an object dtype, then we check that
# each element contains a matrix of the same shape as we did
# above with the transition array.
#
# This indicates that the reward matrices are constructed per
# transition, so that the first dimension is the actions and
# the second two dimensions are the states.
#
# then the reward matrix is per state, so the first dimension is
# the states and the second dimension is the actions.
#
# this is added just so that the next check doesn't error out
# saying that sR1 doesn't exist
#
# the number of actions must be more than zero, the number of states
# must also be more than 0, and the states must agree
#
# now we check to see that what the transition array is reporting and
# what the reward arrar is reporting agree as to the number of actions
# and states. If not then fail explaining the situation
def
checkSquareStochastic
(
Z
):
"""Check if Z is a square stochastic matrix.
Let S = number of states.
Parameters

Z : matrix
This should be a two dimensional array with a shape of (S, S). It can
possibly be sparse.
Notes

Returns None if no error has been detected, else it raises an error.
"""
# try to get the shape of the matrix
try
:
s1
,
s2
=
Z
.
shape
except
AttributeError
:
raise
TypeError
(
"Matrix should be a numpy type."
)
except
ValueError
:
raise
ValueError
(
mdperr
[
"mat_square"
])
# check that the matrix is square, and that each row sums to one
if
s1
!=
s2
:
raise
ValueError
(
mdperr
[
"mat_square"
])
elif
(
absolute
(
Z
.
sum
(
axis
=
1
)

ones
(
s2
))).
max
()
>
10e12
:
raise
ValueError
(
mdperr
[
"mat_stoch"
])
# make sure that there are no values less than zero
try
:
if
(
Z
<
0
).
any
():
raise
ValueError
(
mdperr
[
"mat_nonneg"
])
except
AttributeError
:
try
:
if
(
Z
.
data
<
0
).
any
():
raise
ValueError
(
mdperr
[
"mat_nonneg"
])
except
AttributeError
:
raise
TypeError
(
"Matrix should be a numpy type."
)
except
:
raise
return
(
None
)
def
exampleForest
(
S
=
3
,
r1
=
4
,
r2
=
2
,
p
=
0.1
,
is_sparse
=
False
):
"""Generate a MDP example based on a simple forest management scenario.
This function is used to generate a transition probability
(``A`` × ``S`` × ``S``) array ``P`` and a reward (``S`` × ``A``) matrix
``R`` that model the following problem. A forest is managed by two actions:
'Wait' and 'Cut'. An action is decided each year with first the objective
to maintain an old forest for wildlife and second to make money selling cut
wood. Each year there is a probability ``p`` that a fire burns the forest.
Here is how the problem is modelled.
Let {1, 2 . . . ``S`` } be the states of the forest, with ``S`` being the
oldest. Let 'Wait' be action 1 and 'Cut' action 2.
After a fire, the forest is in the youngest state, that is state 1.
The transition matrix P of the problem can then be defined as follows::
 p 1p 0.......0 
 . 0 1p 0....0 
P[1,:,:] =  . . 0 . 
 . . . 
 . . 1p 
 p 0 0....0 1p 
 1 0..........0 
 . . . 
P[2,:,:] =  . . . 
 . . . 
 . . . 
 1 0..........0 
The reward matrix R is defined as follows::
 0 
 . 
R[:,1] =  . 
 . 
 0 
 r1 
 0 
 1 
R[:,2] =  . 
 . 
 1 
 r2 
Parameters

S : int, optional
The number of states, which should be an integer greater than 0. By
default it is 3.
r1 : float, optional
The reward when the forest is in its oldest state and action 'Wait' is
performed. By default it is 4.
r2 : float, optional
The reward when the forest is in its oldest state and action 'Cut' is
performed. By default it is 2.
p : float, optional
The probability of wild fire occurence, in the range ]0, 1[. By default
it is 0.1.
Returns

out : tuple
``out[1]`` contains the transition probability matrix P with a shape of
(A, S, S). ``out[2]`` contains the reward matrix R with a shape of
(S, A).
Examples

>>> import mdp
>>> P, R = mdp.exampleForest()
>>> P
array([[[ 0.1, 0.9, 0. ],
[ 0.1, 0. , 0.9],
[ 0.1, 0. , 0.9]],
<BLANKLINE>
[[ 1. , 0. , 0. ],
[ 1. , 0. , 0. ],
[ 1. , 0. , 0. ]]])
>>> R
array([[ 0., 0.],
[ 0., 1.],
[ 4., 2.]])
"""
if
S
<=
1
:
raise
ValueError
(
mdperr
[
"S_gt_1"
])
if
(
r1
<=
0
)
or
(
r2
<=
0
):
raise
ValueError
(
mdperr
[
"R_gt_0"
])
if
(
p
<
0
)
or
(
p
>
1
):
raise
ValueError
(
mdperr
[
"prob_in01"
])
# Definition of Transition matrix P(:,:,1) associated to action Wait
# (action 1) and P(:,:,2) associated to action Cut (action 2)
#  p 1p 0.......0   1 0..........0 
#  . 0 1p 0....0   . . . 
# P(:,:,1) =  . . 0 .  and P(:,:,2) =  . . . 
#  . . .   . . . 
#  . . 1p   . . . 
#  p 0 0....0 1p   1 0..........0 
if
is_sparse
:
P
=
[]
rows
=
range
(
S
)
*
2
cols
=
[
0
]
*
S
+
range
(
1
,
S
)
+
[
S

1
]
vals
=
[
p
]
*
S
+
[
1

p
]
*
S
P
.
append
(
coo_matrix
((
vals
,
(
rows
,
cols
)),
shape
=
(
S
,
S
)).
tocsr
())
rows
=
range
(
S
)
cols
=
[
0
]
*
S
vals
=
[
1
]
*
S
P
.
append
(
coo_matrix
((
vals
,
(
rows
,
cols
)),
shape
=
(
S
,
S
)).
tocsr
())
else
:
P
=
zeros
((
2
,
S
,
S
))
P
[
0
,
:,
:]
=
(
1

p
)
*
diag
(
ones
(
S

1
),
1
)
P
[
0
,
:,
0
]
=
p
P
[
0
,
S

1
,
S

1
]
=
(
1

p
)
P
[
1
,
:,
:]
=
zeros
((
S
,
S
))
P
[
1
,
:,
0
]
=
1
# Definition of Reward matrix R1 associated to action Wait and
# R2 associated to action Cut
#  0   0 
#  .   1 
# R(:,1) =  .  and R(:,2) =  . 
#  .   . 
#  0   1 
#  r1   r2 
R
=
zeros
((
S
,
2
))
R
[
S

1
,
0
]
=
r1
R
[:,
1
]
=
ones
(
S
)
R
[
0
,
1
]
=
0
R
[
S

1
,
1
]
=
r2
# we want to return the generated transition and reward matrices
return
(
P
,
R
)
def
exampleRand
(
S
,
A
,
is_sparse
=
False
,
mask
=
None
):
"""Generate a random Markov Decision Process.
Parameters

S : int
number of states (> 0)
A : int
number of actions (> 0)
is_sparse : logical, optional
false to have matrices in dense format, true to have sparse
matrices (default false).