Skip to content

Instantly share code, notes, and snippets.

@A-safarji
Last active January 5, 2022 18:04
Show Gist options
  • Select an option

  • Save A-safarji/e4afe1c0badd15e3cf9a6ff64f7e5ca4 to your computer and use it in GitHub Desktop.

Select an option

Save A-safarji/e4afe1c0badd15e3cf9a6ff64f7e5ca4 to your computer and use it in GitHub Desktop.
LSTM model building
### Create the Stacked LSTM model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.initializers import RandomNormal, Constant
import tensorflow as tf
# Set random seed for reproducibility: get the same result after each time running the model
tf.random.set_seed(1234)
# # Build the LSTM Stack model
model=Sequential()
# Adding first LSTM layer
model.add(LSTM(150,return_sequences=True,input_shape=(100,1)))
model.add(Dropout(0.2)) # Dropout regularisation
# second LSTM layer
model.add(LSTM(150,return_sequences=True))
# Adding third LSTM layer
model.add(LSTM(150, return_sequences=True))
model.add(Dropout(0.2))
# Adding fourth LSTM layer
model.add(LSTM(150)) # you can try 100 as well
model.add(Dropout(0.2))
# Adding the Output Layer
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam')
# summary of the model
model.summary()
## Output:
# Model: "sequential_8"
# _________________________________________________________________
# Layer (type) Output Shape Param #
# =================================================================
# lstm_32 (LSTM) (None, 100, 150) 91200
# dropout_26 (Dropout) (None, 100, 150) 0
# lstm_33 (LSTM) (None, 100, 150) 180600
# lstm_34 (LSTM) (None, 100, 150) 180600
# dropout_27 (Dropout) (None, 100, 150) 0
# lstm_35 (LSTM) (None, 150) 180600
# dropout_28 (Dropout) (None, 150) 0
# dense_8 (Dense) (None, 1) 151
# =================================================================
# Total params: 633,151
# Trainable params: 633,151
# Non-trainable params: 0
# _________________________________________________________________
# use early stop to avoid over-fiting
from tensorflow.keras.callbacks import EarlyStopping
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=30,
verbose=1, mode='auto', restore_best_weights=True)
history=model.fit(X_train,y_train,validation_data=(X_test,ytest),
callbacks=[monitor],verbose=1,epochs=1000)
#history= model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=54,batch_size=64,verbose=1)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment