Commit 19f4b715 authored by Steven Cordwell's avatar Steven Cordwell
Browse files

Update setup.py

parent 223b9137
......@@ -533,17 +533,7 @@ class PolicyIteration(MDP):
self.policy = randi(0, self.A, self.S)
self.discount = discount
if (discount < 1):
# compute a bound for the number of iterations
#self.max_iter = self.boundIter(epsilon)
self.max_iter = 5000
# computation of threshold of variation for V for an epsilon-optimal policy
self.thresh = epsilon * (1 - self.discount) / self.discount
else: # discount == 1
# bound for the number of iterations
self.max_iter = max_iter
# threshold of variation for V for an epsilon-optimal policy
self.thresh = epsilon
self.max_iter = max_iter
self.iter = 0
......
......@@ -7,7 +7,7 @@ setup(name="PyMDPtoolbox",
description="Python Markov Decision Problem Toolbox",
author="Steven Cordwell",
author_email="steven.cordwell@uqconnect.edu.au",
url="http://",
url="http://code.google.com/p/pymdptoolbox/",
license="Modified BSD License",
py_modules=["mdp"],
requires=["math", "numpy", "random", "time"],)
\ No newline at end of file
requires=["math", "numpy", "random", "scipy", "time"],)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment