Skip to content

Instantly share code, notes, and snippets.

@brtkwr
Forked from awjuliani/rl-tutorial-2.ipynb
Last active April 23, 2018 08:40
Show Gist options
  • Select an option

  • Save brtkwr/ecc74e0d835c976481f7beaf08993272 to your computer and use it in GitHub Desktop.

Select an option

Save brtkwr/ecc74e0d835c976481f7beaf08993272 to your computer and use it in GitHub Desktop.
Reinforcement Learning Tutorial 2 (Cart Pole problem)
import numpy as np
import pickle
import tensorflow as tf
import matplotlib.pyplot as plt
import math
import gym
env = gym.make('LunarLander-v2')
print ('Shape of the observation space is', env.observation_space.shape)
# hyperparameters
H = 100 # number of hidden layer neurons
batch_size = 5 # every how many episodes to do a param update?
learning_rate = 1e-4 # feel free to play with this to train faster or more stably.
gamma = 0.99 # discount factor for reward
D, = env.observation_space.shape # input dimensionality
tf.reset_default_graph()
#This defines the network as it goes from taking an observation of the environment to
#giving a probability of chosing to the action of moving left or right.
observations = tf.placeholder(tf.float32, [None,D] , name="input_x")
W1 = tf.get_variable("W1", shape=[D, H],
initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(observations,W1))
W2 = tf.get_variable("W2", shape=[H, H],
initializer=tf.contrib.layers.xavier_initializer())
layer2 = tf.nn.relu(tf.matmul(layer1,W2))
W3 = tf.get_variable("W3", shape=[H, env.action_space.n],
initializer=tf.contrib.layers.xavier_initializer())
score = tf.matmul(layer2,W3)
probability = tf.nn.softmax(score)
#From here we define the parts of the network needed for learning a good policy.
tvars = tf.trainable_variables()
input_y = tf.placeholder(tf.float32,[None,env.action_space.n], name="input_y")
advantages = tf.placeholder(tf.float32,name="reward_signal")
# The loss function. This sends the weights in the direction of making actions
# that gave good advantage (reward over time) more likely, and actions that didn't less likely.
# loglik = tf.log(input_y*(input_y - probability) + (1 - input_y)*(input_y + probability))
# loglik = input_y*(input_y - probability) + (1 - input_y)*(input_y + probability)
# loglik = input_y*probability + (1 - input_y)*(1 - probability)
# loss = -tf.reduce_sum(loglik * advantages)
# loglik = input_y*(input_y - probability) + (1 - input_y)*(input_y + probability)
loglik = tf.square(input_y - probability)
loss = tf.reduce_sum(loglik * advantages)
newGrads = tf.gradients(loss,tvars)
# Once we have collected a series of gradients from multiple episodes, we apply them.
# We don't just apply gradeients after every episode in order to account for noise in the reward signal.
adam = tf.train.AdamOptimizer(learning_rate=learning_rate) # Our optimizer
W1Grad = tf.placeholder(tf.float32,name="batch_grad1") # Placeholders to send the final gradients through when we update.
W2Grad = tf.placeholder(tf.float32,name="batch_grad2")
W3Grad = tf.placeholder(tf.float32,name="batch_grad3")
batchGrad = [W1Grad,W2Grad,W3Grad]
updateGrads = adam.apply_gradients(zip(batchGrad,tvars))
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# %%time
xs,hs,dlogps,drs,ys,tfps = [],[],[],[],[],[]
running_reward = None
running_loss = None
reward_sum = 0
episode_number = 1
total_episodes = 1000
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
rendering = False
sess.run(init)
observation = env.reset() # Obtain an initial observation of the environment
# Reset the gradient placeholder. We will collect gradients in
# gradBuffer until we are ready to update our policy network.
gradBuffer = sess.run(tvars)
for ix,grad in enumerate(gradBuffer):
print (grad.shape)
gradBuffer[ix] = grad * 0
while episode_number <= total_episodes:
# Rendering the environment slows things down,
# so let's only look at it once our agent is doing a good job.
if reward_sum/batch_size > 0:
env.render()
# Make sure the observation is in a shape the network can handle.
x = np.reshape(observation,[1,D])
# Run the policy network and get an action to take.
tfprob = sess.run(probability,feed_dict={observations: x})
# print (tfprob)
# action = 0 if np.random.uniform() < tfprob else 1
action = np.argmax(tfprob)
xs.append(x) # observation
y = action # a "fake label"
ys.append(y)
# step the environment and get new measurements
observation, reward, done, info = env.step(action)
reward_sum += reward
drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
if done:
# print( drs)
# print(tfprob)
episode_number += 1
# stack together all inputs, hidden states, action gradients, and rewards for this episode
epx = np.vstack(xs)
# epy = np.vstack(ys)
epy = np.eye(env.action_space.n)[ys]
epr = np.vstack(drs)
tfp = tfps
xs,hs,dlogps,drs,ys,tfps = [],[],[],[],[],[] # reset array memory
# compute the discounted reward backwards through time
discounted_epr = discount_rewards(epr)
# size the rewards to be unit normal (helps control the gradient estimator variance)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
# Get the gradient for this episode, and save it in the gradBuffer
tProb,tLoglik,tLoss,tGrad = sess.run(fetches=(probability,loglik,loss,newGrads),feed_dict={observations: epx, input_y: epy, advantages: discounted_epr})
if episode_number%500 == 0:
for item in zip(discounted_epr,epy,tProb,tLoglik):
print (item)
# Iterating over the layers
for ix,grad in enumerate(tGrad):
gradBuffer[ix] += grad
# If we have completed enough episodes, then update the policy network with our gradients.
if episode_number % batch_size == 0:
sess.run(updateGrads,feed_dict={W1Grad: gradBuffer[0],W2Grad:gradBuffer[1],W3Grad:gradBuffer[2]})
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
# Give a summary of how well our network is doing for each batch of episodes.
running_reward = reward_sum if running_reward is None else running_reward * 0.95 + reward_sum * 0.05
running_loss = tLoss if running_loss is None else running_loss * 0.95 + tLoss * 0.05
print ('%d Episode reward %f. Running reward %f. Episode loss %f. Running loss %f.' % (episode_number,reward_sum/batch_size, running_reward/batch_size, tLoss, running_loss))
if reward_sum/batch_size > 200:
print ("Task solved in",episode_number,'episodes!')
break
reward_sum = 0
observation = env.reset()
print (episode_number,'Episodes completed.')
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Simple Reinforcement Learning in Tensorflow Part 2: Policy Gradient Method\n",
"This tutorial contains a simple example of how to build a policy-gradient based agent that can solve the CartPole problem. For more information, see this [Medium post](https://medium.com/@awjuliani/super-simple-reinforcement-learning-tutorial-part-2-ded33892c724#.mtwpvfi8b).\n",
"\n",
"For more Reinforcement Learning algorithms, including DQN and Model-based learning in Tensorflow, see my Github repo, [DeepRL-Agents](https://github.com/awjuliani/DeepRL-Agents). \n",
"\n",
"Parts of this tutorial are based on code by [Andrej Karpathy](https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5) and [korymath](https://gym.openai.com/evaluations/eval_a0aVJrGSyW892vBM04HQA)."
]
},
{
"cell_type": "code",
"execution_count": 56,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import numpy as np\n",
"import pickle\n",
"import tensorflow as tf\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import math"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Loading the CartPole Environment\n",
"If you don't already have the OpenAI gym installed, use `pip install gym` to grab it."
]
},
{
"cell_type": "code",
"execution_count": 93,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[2017-03-02 17:05:43,976] Making new env: LunarLander-v2\n"
]
}
],
"source": [
"import gym\n",
"env = gym.make('LunarLander-v2')"
]
},
{
"cell_type": "code",
"execution_count": 94,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"Box(8,)"
]
},
"execution_count": 94,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"env.observation_space"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"What happens if we try running the environment with random actions? How well do we do? (Hint: not so well.)"
]
},
{
"cell_type": "code",
"execution_count": 95,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Reward for this episode was: -289.554472433\n",
"Reward for this episode was: -124.40502918\n",
"Reward for this episode was: -89.0348428285\n",
"Reward for this episode was: -401.851968691\n",
"Reward for this episode was: -126.879062743\n"
]
}
],
"source": [
"env.reset()\n",
"random_episodes = 0\n",
"reward_sum = 0\n",
"while random_episodes < 5:\n",
" env.render()\n",
" observation, reward, done, _ = env.step(np.random.randint(0,env.action_space.n))\n",
" reward_sum += reward\n",
" if done:\n",
" random_episodes += 1\n",
" print (\"Reward for this episode was:\",reward_sum)\n",
" reward_sum = 0\n",
" env.reset()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The goal of the task is to achieve a reward of 200 per episode. For every step the agent keeps the pole in the air, the agent recieves a +1 reward. By randomly choosing actions, our reward for each episode is only a couple dozen. Let's make that better with RL!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Setting up our Neural Network agent\n",
"This time we will be using a Policy neural network that takes observations, passes them through a single hidden layer, and then produces a probability of choosing a left/right movement. To learn more about this network, see [Andrej Karpathy's blog on Policy Gradient networks](http://karpathy.github.io/2016/05/31/rl/)."
]
},
{
"cell_type": "code",
"execution_count": 96,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shape of the observation space is (8,)\n"
]
}
],
"source": [
"print ('Shape of the observation space is', env.observation_space.shape)"
]
},
{
"cell_type": "code",
"execution_count": 97,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# hyperparameters\n",
"H = 50 # number of hidden layer neurons\n",
"batch_size = 5 # every how many episodes to do a param update?\n",
"learning_rate = 1e-2 # feel free to play with this to train faster or more stably.\n",
"gamma = 0.99 # discount factor for reward\n",
"\n",
"D, = env.observation_space.shape # input dimensionality"
]
},
{
"cell_type": "code",
"execution_count": 101,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"tf.reset_default_graph()\n",
"\n",
"#This defines the network as it goes from taking an observation of the environment to \n",
"#giving a probability of chosing to the action of moving left or right.\n",
"observations = tf.placeholder(tf.float32, [None,D] , name=\"input_x\")\n",
"W1 = tf.get_variable(\"W1\", shape=[D, H],\n",
" initializer=tf.contrib.layers.xavier_initializer())\n",
"layer1 = tf.nn.relu(tf.matmul(observations,W1))\n",
"\n",
"W2 = tf.get_variable(\"W2\", shape=[H, H],\n",
" initializer=tf.contrib.layers.xavier_initializer())\n",
"layer2 = tf.nn.relu(tf.matmul(layer1,W2))\n",
"\n",
"\n",
"W3 = tf.get_variable(\"W3\", shape=[H, env.action_space.n],\n",
" initializer=tf.contrib.layers.xavier_initializer())\n",
"score = tf.matmul(layer2,W3)\n",
"\n",
"probability = tf.nn.log_softmax(score)\n",
"\n",
"#From here we define the parts of the network needed for learning a good policy.\n",
"tvars = tf.trainable_variables()\n",
"input_y = tf.placeholder(tf.float32,[None,env.action_space.n], name=\"input_y\")\n",
"advantages = tf.placeholder(tf.float32,name=\"reward_signal\")\n",
"\n",
"# The loss function. This sends the weights in the direction of making actions \n",
"# that gave good advantage (reward over time) more likely, and actions that didn't less likely.\n",
"# loglik = tf.log(input_y*(input_y - probability) + (1 - input_y)*(input_y + probability))\n",
"# loss = -tf.reduce_mean(loglik * advantages)\n",
"loglik = input_y*(input_y - probability) + (1 - input_y)*(input_y + probability)\n",
"loss = -tf.reduce_sum(loglik * advantages)\n",
"\n",
"newGrads = tf.gradients(loss,tvars)\n",
"\n",
"# Once we have collected a series of gradients from multiple episodes, we apply them.\n",
"# We don't just apply gradeients after every episode in order to account for noise in the reward signal.\n",
"adam = tf.train.AdamOptimizer(learning_rate=learning_rate) # Our optimizer\n",
"W1Grad = tf.placeholder(tf.float32,name=\"batch_grad1\") # Placeholders to send the final gradients through when we update.\n",
"W2Grad = tf.placeholder(tf.float32,name=\"batch_grad2\")\n",
"W3Grad = tf.placeholder(tf.float32,name=\"batch_grad3\")\n",
"batchGrad = [W1Grad,W2Grad,W3Grad]\n",
"updateGrads = adam.apply_gradients(zip(batchGrad,tvars))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Advantage function\n",
"This function allows us to weigh the rewards our agent recieves. In the context of the Cart-Pole task, we want actions that kept the pole in the air a long time to have a large reward, and actions that contributed to the pole falling to have a decreased or negative reward. We do this by weighing the rewards from the end of the episode, with actions at the end being seen as negative, since they likely contributed to the pole falling, and the episode ending. Likewise, early actions are seen as more positive, since they weren't responsible for the pole falling."
]
},
{
"cell_type": "code",
"execution_count": 102,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def discount_rewards(r):\n",
" \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n",
" discounted_r = np.zeros_like(r)\n",
" running_add = 0\n",
" for t in reversed(range(0, r.size)):\n",
" running_add = running_add * gamma + r[t]\n",
" discounted_r[t] = running_add\n",
" return discounted_r"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Running the Agent and Environment"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we run the neural network agent, and have it act in the CartPole environment."
]
},
{
"cell_type": "code",
"execution_count": 103,
"metadata": {
"collapsed": false,
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(8, 50)\n",
"(50, 50)\n",
"(50, 4)\n",
"19.2147\n",
"39.1676\n",
"82.4522\n",
"13.3252\n",
"Average reward for episode -579.168964. Total average reward -579.168964.\n",
"3.28467\n",
"-4.87285\n",
"-0.525373\n",
"11.9898\n",
"-1.97829\n",
"Average reward for episode -509.259002. Total average reward -578.469864.\n",
"-5.47647\n",
"-7.90704\n",
"-7.18119\n",
"-7.74173\n",
"-5.1121\n",
"Average reward for episode -548.911812. Total average reward -578.174283.\n",
"-21.1336\n",
"-31.7018\n",
"-14.3208\n",
"-20.7738\n",
"-13.6283\n",
"Average reward for episode -693.387940. Total average reward -579.326420.\n",
"-26.2287\n",
"-13.4112\n",
"-24.6738\n",
"-15.1901\n",
"-10.9559\n",
"Average reward for episode -585.268589. Total average reward -579.385842.\n",
"-35.4176\n",
"-40.8808\n",
"-32.7765\n",
"-25.2876\n",
"-30.2069\n",
"Average reward for episode -558.296488. Total average reward -579.174948.\n",
"-75.6232\n",
"-64.7216\n",
"-62.2063\n",
"-46.253\n",
"-33.1521\n",
"Average reward for episode -615.465103. Total average reward -579.537850.\n",
"-131.35\n",
"-95.5993\n",
"-116.743\n",
"-39.6759\n",
"-61.1253\n",
"Average reward for episode -730.589017. Total average reward -581.048361.\n",
"-99.5541\n",
"-74.4258\n",
"-59.3504\n",
"-72.7925\n",
"-66.3265\n",
"Average reward for episode -495.467435. Total average reward -580.192552.\n",
"-102.498\n",
"-204.257\n",
"-119.303\n",
"-85.9933\n",
"-198.126\n",
"Average reward for episode -646.904296. Total average reward -580.859670.\n",
"-120.013\n",
"-88.7281\n",
"-156.272\n",
"-110.302\n",
"-341.982\n",
"Average reward for episode -612.597562. Total average reward -581.177048.\n",
"-289.861\n",
"-191.097\n",
"-392.386\n",
"-182.54\n",
"-334.003\n",
"Average reward for episode -730.335526. Total average reward -582.668633.\n",
"-416.628\n",
"-276.224\n",
"-254.14\n",
"-181.417\n",
"-789.78\n",
"Average reward for episode -755.823409. Total average reward -584.400181.\n",
"-269.952\n",
"-510.346\n",
"-284.146\n",
"-311.78\n",
"-400.737\n",
"Average reward for episode -636.106309. Total average reward -584.917242.\n",
"-407.014\n",
"-722.488\n",
"-866.629\n",
"-533.625\n",
"-274.55\n",
"Average reward for episode -653.302290. Total average reward -585.601093.\n",
"-767.682\n",
"-760.744\n",
"-641.328\n",
"-550.502\n",
"-247.315\n",
"Average reward for episode -689.741572. Total average reward -586.642498.\n",
"-404.212\n",
"-643.383\n",
"-677.364\n",
"-1105.7\n",
"-421.344\n",
"Average reward for episode -592.367399. Total average reward -586.699747.\n",
"-402.44\n",
"-963.116\n",
"-690.3\n",
"-584.048\n",
"-572.666\n",
"Average reward for episode -545.538149. Total average reward -586.288131.\n",
"-1252.33\n",
"-654.277\n",
"-1290.36\n",
"-559.603\n",
"-1217.71\n",
"Average reward for episode -511.302721. Total average reward -585.538277.\n",
"-735.711\n",
"-828.524\n",
"-1026.95\n",
"-757.529\n",
"-757.173\n",
"Average reward for episode -497.830422. Total average reward -584.661198.\n",
"-1271.81\n",
"-1005.07\n",
"-663.242\n",
"-1092.23\n",
"-1311.21\n",
"Average reward for episode -561.009110. Total average reward -584.424677.\n",
"-1047.78\n",
"-1678.23\n",
"-1016.11\n",
"-1239.32\n",
"-1470.36\n",
"Average reward for episode -551.954932. Total average reward -584.099980.\n",
"-1351.27\n",
"-1684.68\n",
"-2670.17\n",
"-2498.49\n",
"-2019.08\n",
"Average reward for episode -660.505662. Total average reward -584.864036.\n",
"-1969.66\n",
"-2627.09\n",
"-3342.81\n",
"-1883.49\n",
"-2419.62\n",
"Average reward for episode -694.234165. Total average reward -585.957738.\n",
"-1738.66\n",
"-2817.71\n",
"-1774.26\n",
"-2272.61\n",
"-1768.03\n",
"Average reward for episode -561.090582. Total average reward -585.709066.\n",
"-4225.56\n",
"-1389.07\n",
"-2991.08\n",
"-3130.92\n",
"-2909.69\n",
"Average reward for episode -631.379018. Total average reward -586.165766.\n",
"-1523.93\n",
"-2946.25\n",
"-2754.69\n",
"-3600.9\n",
"-2672.05\n",
"Average reward for episode -536.946950. Total average reward -585.673578.\n",
"-2850.81\n",
"-2501.99\n",
"-2336.72\n",
"-3092.37\n",
"-2284.95\n",
"Average reward for episode -525.112451. Total average reward -585.067966.\n",
"-3535.37\n",
"-5634.23\n",
"-2140.05\n",
"-2601.54\n",
"-3692.38\n",
"Average reward for episode -597.710825. Total average reward -585.194395.\n",
"-3637.96\n",
"-2659.66\n",
"-5424.34\n",
"-5016.86\n",
"-4078.88\n",
"Average reward for episode -633.200409. Total average reward -585.674455.\n",
"-7354.7\n",
"-4567.64\n",
"-4607.57\n",
"-4418.94\n",
"-3679.2\n",
"Average reward for episode -590.443095. Total average reward -585.722141.\n",
"-3448.18\n",
"-3667.75\n",
"-3098.32\n",
"-4175.69\n",
"-6131.3\n",
"Average reward for episode -476.940664. Total average reward -584.634327.\n",
"-5342.85\n",
"-8966.21\n",
"-8192.25\n",
"-5963.65\n",
"-3737.29\n",
"Average reward for episode -628.662304. Total average reward -585.074606.\n",
"-8143.14\n",
"-4811.83\n",
"-5633.24\n",
"-6238.73\n",
"-3996.52\n",
"Average reward for episode -560.139461. Total average reward -584.825255.\n",
"-7653.13\n",
"-11558.4\n",
"-4915.99\n",
"-8830.91\n",
"-5892.06\n",
"Average reward for episode -652.155097. Total average reward -585.498553.\n",
"-8304.83\n",
"-11273.3\n",
"-6735.08\n",
"-7642.39\n",
"-8425.88\n",
"Average reward for episode -629.277561. Total average reward -585.936343.\n",
"-8838.94\n",
"-9284.89\n",
"-12558.9\n",
"-10206.2\n",
"-8379.26\n",
"Average reward for episode -634.295462. Total average reward -586.419935.\n",
"-8394.11\n",
"-10595.0\n",
"-6211.97\n",
"-8240.54\n",
"-13153.4\n",
"Average reward for episode -560.361941. Total average reward -586.159355.\n",
"-9827.53\n",
"-7751.1\n",
"-8047.93\n",
"-8357.5\n",
"-9770.28\n",
"Average reward for episode -490.100319. Total average reward -585.198764.\n",
"-16116.3\n",
"-10450.0\n",
"-10067.2\n",
"-19827.7\n",
"-14566.5\n",
"Average reward for episode -687.117158. Total average reward -586.217948.\n",
"-11793.7\n",
"-11535.1\n",
"-10513.6\n",
"-9397.95\n",
"-12972.2\n",
"Average reward for episode -536.339251. Total average reward -585.719161.\n",
"-18810.1\n",
"-16359.5\n",
"-18929.3\n",
"-18326.8\n",
"-23238.8\n",
"Average reward for episode -695.600291. Total average reward -586.817973.\n",
"-22740.2\n",
"-13472.7\n",
"-13466.1\n",
"-16234.8\n",
"-12128.2\n",
"Average reward for episode -609.255157. Total average reward -587.042344.\n",
"-18195.6\n",
"-16359.1\n",
"-16667.2\n",
"-14104.6\n",
"-11643.0\n",
"Average reward for episode -544.360868. Total average reward -586.615530.\n",
"-18865.1\n",
"-30570.0\n",
"-14944.3\n",
"-16603.5\n",
"-13877.2\n",
"Average reward for episode -574.946681. Total average reward -586.498841.\n",
"-23772.2\n",
"-17701.9\n",
"-26770.0\n",
"-29588.9\n",
"-24702.0\n",
"Average reward for episode -662.828407. Total average reward -587.262137.\n",
"-29694.0\n",
"-15465.3\n",
"-31799.5\n",
"-12616.5\n",
"-18533.1\n",
"Average reward for episode -575.006382. Total average reward -587.139579.\n",
"-26501.4\n",
"-20439.7\n",
"-18671.5\n",
"-24763.7\n",
"-31900.0\n",
"Average reward for episode -606.146769. Total average reward -587.329651.\n",
"-31673.5\n",
"-35891.4\n",
"-36981.8\n",
"-30090.5\n",
"-30231.7\n",
"Average reward for episode -715.002102. Total average reward -588.606376.\n",
"-27468.5\n",
"-19533.7\n",
"-38439.8\n",
"-45350.0\n",
"-22982.0\n",
"Average reward for episode -629.639646. Total average reward -589.016708.\n",
"-33707.2\n",
"-48386.8\n",
"-47698.9\n",
"-42111.0\n",
"-23724.0\n",
"Average reward for episode -714.697672. Total average reward -590.273518.\n",
"-28675.5\n",
"-26577.7\n",
"-51435.1\n",
"-40919.5\n",
"-32980.3\n",
"Average reward for episode -590.944765. Total average reward -590.280231.\n",
"-32058.2\n",
"-21669.3\n",
"-33697.7\n",
"-40532.9\n",
"-62649.8\n",
"Average reward for episode -591.846955. Total average reward -590.295898.\n",
"-46936.3\n",
"-27578.5\n",
"-48287.0\n",
"-37138.0\n",
"-64650.1\n",
"Average reward for episode -640.786167. Total average reward -590.800800.\n",
"-36454.4\n",
"-42458.3\n",
"-37841.2\n",
"-44481.8\n",
"-68128.9\n",
"Average reward for episode -657.597172. Total average reward -591.468764.\n",
"-64143.8\n",
"-61595.2\n",
"-57061.6\n",
"-34732.7\n",
"-59792.6\n",
"Average reward for episode -691.709245. Total average reward -592.471169.\n",
"-65495.1\n",
"-76001.3\n",
"-86469.0\n",
"-38124.0\n",
"-67280.4\n",
"Average reward for episode -666.372901. Total average reward -593.210186.\n",
"-56004.3\n",
"-52040.2\n",
"-63846.9\n",
"-51468.9\n",
"-68066.0\n",
"Average reward for episode -635.836028. Total average reward -593.636445.\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-103-8e4d4cc66c9e>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;31m# Run the policy network and get an action to take.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0mtfprob\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprobability\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mobservations\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;31m# print (tfprob)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;31m# action = 0 if np.random.uniform() < tfprob else 1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/home/bharat.kunwar/Anaconda3/envs/openai/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 764\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 765\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 766\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 767\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 768\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/home/bharat.kunwar/Anaconda3/envs/openai/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 962\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 963\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m--> 964\u001b[0;31m feed_dict_string, options, run_metadata)\n\u001b[0m\u001b[1;32m 965\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 966\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/home/bharat.kunwar/Anaconda3/envs/openai/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1012\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1013\u001b[0m return self._do_call(_run_fn, self._session, feed_dict, fetch_list,\n\u001b[0;32m-> 1014\u001b[0;31m target_list, options, run_metadata)\n\u001b[0m\u001b[1;32m 1015\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1016\u001b[0m return self._do_call(_prun_fn, self._session, handle, feed_dict,\n",
"\u001b[0;32m/home/bharat.kunwar/Anaconda3/envs/openai/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1019\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1021\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1022\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1023\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/home/bharat.kunwar/Anaconda3/envs/openai/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1001\u001b[0m return tf_session.TF_Run(session, options,\n\u001b[1;32m 1002\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1003\u001b[0;31m status, run_metadata)\n\u001b[0m\u001b[1;32m 1004\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1005\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msession\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"# %%time\n",
"\n",
"xs,hs,dlogps,drs,ys,tfps = [],[],[],[],[],[]\n",
"running_reward = None\n",
"reward_sum = 0\n",
"episode_number = 1\n",
"total_episodes = 10000\n",
"init = tf.global_variables_initializer()\n",
"\n",
"# Launch the graph\n",
"with tf.Session() as sess:\n",
" rendering = False\n",
" sess.run(init)\n",
" observation = env.reset() # Obtain an initial observation of the environment\n",
"\n",
" # Reset the gradient placeholder. We will collect gradients in \n",
" # gradBuffer until we are ready to update our policy network. \n",
" gradBuffer = sess.run(tvars)\n",
" for ix,grad in enumerate(gradBuffer):\n",
" print (grad.shape)\n",
" gradBuffer[ix] = grad * 0\n",
" \n",
" while episode_number <= total_episodes:\n",
" \n",
" # Rendering the environment slows things down, \n",
" # so let's only look at it once our agent is doing a good job.\n",
" if reward_sum/batch_size > 0: \n",
" env.render()\n",
" \n",
" # Make sure the observation is in a shape the network can handle.\n",
" x = np.reshape(observation,[1,D])\n",
" \n",
" # Run the policy network and get an action to take. \n",
" tfprob = sess.run(probability,feed_dict={observations: x})\n",
"# print (tfprob)\n",
"# action = 0 if np.random.uniform() < tfprob else 1\n",
" action = np.argmax(tfprob)\n",
" \n",
" xs.append(x) # observation\n",
" y = action # a \"fake label\"\n",
" ys.append(y)\n",
" \n",
" # step the environment and get new measurements\n",
" observation, reward, done, info = env.step(action)\n",
" reward_sum += reward\n",
"\n",
" drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)\n",
"\n",
" if done: \n",
" # print(tfprob)\n",
" episode_number += 1\n",
" # stack together all inputs, hidden states, action gradients, and rewards for this episode\n",
" epx = np.vstack(xs)\n",
" # epy = np.vstack(ys)\n",
" epy = np.eye(env.action_space.n)[ys]\n",
" epr = np.vstack(drs)\n",
" tfp = tfps\n",
" xs,hs,dlogps,drs,ys,tfps = [],[],[],[],[],[] # reset array memory\n",
" \n",
" # compute the discounted reward backwards through time\n",
" \n",
" discounted_epr = discount_rewards(epr)\n",
" \n",
" # size the rewards to be unit normal (helps control the gradient estimator variance)\n",
" discounted_epr -= np.mean(discounted_epr)\n",
" discounted_epr /= np.std(discounted_epr)\n",
" \n",
" # Get the gradient for this episode, and save it in the gradBuffer\n",
" tGrad,tLoss = sess.run(fetches=(newGrads,loss),feed_dict={observations: epx, input_y: epy, advantages: discounted_epr})\n",
" print(tLoss)\n",
" # Iterating over the layers\n",
" for ix,grad in enumerate(tGrad):\n",
" gradBuffer[ix] += grad\n",
" \n",
" # If we have completed enough episodes, then update the policy network with our gradients.\n",
" if episode_number % batch_size == 0: \n",
" sess.run(updateGrads,feed_dict={W1Grad: gradBuffer[0],W2Grad:gradBuffer[1],W3Grad:gradBuffer[2]})\n",
" for ix,grad in enumerate(gradBuffer):\n",
" gradBuffer[ix] = grad * 0\n",
" \n",
" # Give a summary of how well our network is doing for each batch of episodes.\n",
" running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01\n",
" print ('Average reward for episode %f. Total average reward %f.' % (reward_sum/batch_size, running_reward/batch_size))\n",
" \n",
" if reward_sum/batch_size > 200: \n",
" print (\"Task solved in\",episode_number,'episodes!')\n",
" break\n",
" \n",
" reward_sum = 0\n",
" \n",
" observation = env.reset()\n",
" \n",
"print (episode_number,'Episodes completed.')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As you can see, the network not only does much better than random actions, but achieves the goal of 200 points per episode, thus solving the task!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python [conda env:openai]",
"language": "python",
"name": "conda-env-openai-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment