Skip to content

Instantly share code, notes, and snippets.

@kjanjua26
Created August 23, 2017 20:54
Show Gist options
  • Select an option

  • Save kjanjua26/041ac5392dab7515806fce28f43cb613 to your computer and use it in GitHub Desktop.

Select an option

Save kjanjua26/041ac5392dab7515806fce28f43cb613 to your computer and use it in GitHub Desktop.
import common
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
import time
import matplotlib.pyplot as plt
import numpy as np
sequence_length = tf.placeholder(tf.int32, [None])
conv_concat = []
max_pool_size = 4
# Utility functions
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.4)
return tf.Variable(initial)
def bias_variable(shape):
#print(type(shape))
#time.sleep(300)
initial = tf.constant(0.2, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x,W, strides=[1,1,1,1], padding='SAME')
def maxpool(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def convolutional_layers():
sample = tf.placeholder(tf.float32, [None, None, common.OUTPUT_SHAPE[0]])
x = tf.placeholder(tf.float32, [common.BATCH_SIZE,48,918,1])
y = tf.placeholder(tf.float32, [common.BATCH_SIZE,common.num_classes])
W = weight_variable([3,3,1,16])
b = bias_variable([16])
conv1 = tf.nn.relu(conv2d(x,W)+b)
pool1 = maxpool(conv1)
print pool1
pool1 = tf.contrib.layers.flatten(pool1)
pool1 = tf.reshape(pool1, [100,3672,48])
#pool1 = tf.squeeze(pool1)
#conv_to_rnn_dims = (48 // (max_pool_size ** 2), (918 // (max_pool_size ** 2)) * 16)
#print "Conv to Rnn: ", conv_to_rnn_dims
#pool1 = tf.reshape(pool1, [100,918,48])
#pool1 = tf.strided_slice(pool1, [100,3672,48],[100,918,48])
#x_out = tf.reshape(pool1,[100,conv_to_rnn_dims,48])
#return x_out
print("Shapeeee")
print tf.shape(pool1)
return pool1
'''
pooled = tf.nn.max_pool(h, ksize=[1, max_pool_size, 1, 1], strides=[1, max_pool_size, 1, 1], padding='SAME')
conv_concat = tf.concat(conv1,2)
pooled = tf.reshape(pooled, [-1, reduced, 128])clear
pooled_concat.append(pooled)
pooled_concat = tf.concat(pooled_concat,2)
pooled_concat = tf.nn.dropout(pooled_concat, 0.5)
return pooled_concat
'''
def get_train_model():
#x,y, params = convolutional_layers() # y is h_pool3
x = convolutional_layers()
print ("##########################")
print ("The output is: "), x.get_shape()
print("")
print("")
inputs = tf.placeholder(tf.float32, [None, None, common.OUTPUT_SHAPE[0]])
# Here we use sparse_placeholder that will generate a
# SparseTensor required by ctc_loss op.
targets = tf.sparse_placeholder(tf.int32)
# 1d array of size [batch_size]
seq_len = tf.placeholder(tf.int32, [None])
# Defining the cell for forward and backward layer
forwardH1 = rnn_cell.LSTMCell(common.num_hidden, use_peepholes=True, state_is_tuple=True)
backwardH1 = rnn_cell.LSTMCell(common.num_hidden, use_peepholes=True, state_is_tuple=True)
# The second output previous state and is ignored
outputs, _ = tf.nn.bidirectional_dynamic_rnn(forwardH1,backwardH1,x,seq_len,dtype=tf.float32)
outputs=tf.concat(2,outputs)
shape = tf.shape(x)
batch_s, max_timesteps = shape[0], shape[1]
weights = tf.Variable(tf.truncated_normal([common.num_hidden,
common.num_classes],
stddev=0.4), name="weights")
# Reshaping to apply the same weights over the timesteps
outputs = tf.reshape(outputs, [-1, 2*common.num_hidden])
# Truncated normal with mean 0 and stdev=0.1
#W = tf.Variable(tf.truncated_normal([2*common.num_hidden, common.num_classes], stddev=0.1), name="W")
W = tf.Variable(tf.truncated_normal([2*common.num_hidden, common.num_classes], stddev=0.5), name="W")
# Zero initialization
b = tf.zeros(shape=[common.num_classes],name='b')
#b = tf.ones(shape=[common.num_classes],name='b')
# Doing the affine projection
logits = tf.matmul(outputs, W)+b
# Reshaping back to the original shape
logits = tf.reshape(logits, [batch_s, -1, common.num_classes])
# Time major
logits = tf.transpose(logits, (1, 0, 2))
return logits, x, targets, seq_len, W, b
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment