diff --git a/HISTORY b/HISTORY index 8cacc65dafd8673e925645134168257f3daf1279..f3a385ac39b3af7c983eb7f42f4b63777beda01b 100644 --- a/HISTORY +++ b/HISTORY @@ -1,3 +1,11 @@ +2013-01-26 +v0.14 - the linear programming code has been finalised, however there is currently no way to turn of the progress status of the + lp solver from the cvxopt module. One unit test has been written. To use linear programming method: + >>> import mdp + >>> P, R = mdp.exampleForest() + >>> lp = mdp.LP(P, R, 0.9) + >>> lp.iterate() # and after some output from cvxopt.solvers.lp the optimal policy can be shown with lp.policy + 2013-01-25 v0.13 - the FiniteHorizon class has been fixed. No unit tests yet. >>> import mdp diff --git a/NEWS b/NEWS index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..c13ad90abfd61afab7c88882e22afafc12740f8b 100644 --- a/NEWS +++ b/NEWS @@ -0,0 +1,5 @@ +2013-01-26 +The release of v0.14 will be made today. With this all the functionality should be available to use. The initial version release +of 4.0 will be made once the documentation has been finalised and unit tests have been written to cover all of the classes. +The first release will be 4.0 to match the MATLAB/Octave/Scilab/R MDP Toolboxes that are available at +http://www.inra.fr/mia/T/MDPtoolbox/ diff --git a/setup.py b/setup.py index 1f18c5f08ae9d18fce8d01c33875f52ba3b287fe..2e7dcdb2210dacc6730a48959a0a824b3860e49d 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ from distutils.core import setup setup(name="PyMDPtoolbox", - version="0.13", + version="0.14", description="Python Markov Decision Problem Toolbox", author="Steven Cordwell", author_email="steven.cordwell@uqconnect.edu.au",