Commit f440ef64 authored by Saurabh Deshpande's avatar Saurabh Deshpande

Changes in CNN.py

parent c3cae2dd
...@@ -18,27 +18,10 @@ from keras.callbacks import ModelCheckpoint, LearningRateScheduler ...@@ -18,27 +18,10 @@ from keras.callbacks import ModelCheckpoint, LearningRateScheduler
np.random.seed(123) # for reproducibility np.random.seed(123) # for reproducibility
#def _get_available_gpus():
# """Get a list of available gpu devices (formatted as strings).
#
# # Returns
# A list of available GPU devices.
# """
# #global _LOCAL_DEVICES
# if tfback._LOCAL_DEVICES is None:
# devices = tf.config.list_logical_devices()
# tfback._LOCAL_DEVICES = [x.name for x in devices]
# return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]
#
#
#tfback._get_available_gpus = _get_available_gpus
#tfback._get_available_gpus()
#
##tf.config.experimental_list_devices()
#tf.config.list_logical_devices()
################ Let us import the data to train our ANN ########################### ################ Let us import the data to train our ANN ###########################
#This is not uploaded on gitlab because of heavy size of training data
df = pd.read_csv('/home/users/sdeshpande/Deep_CNN/CSV_files/topface_converged_nov.csv', names=['f', 'u'], df = pd.read_csv('/home/users/sdeshpande/Deep_CNN/CSV_files/topface_converged_nov.csv', names=['f', 'u'],
header=None) header=None)
...@@ -117,13 +100,9 @@ conv9 = Conv3D(64, (3,3, 3), activation='relu', padding='same',data_format='chan ...@@ -117,13 +100,9 @@ conv9 = Conv3D(64, (3,3, 3), activation='relu', padding='same',data_format='chan
conv9 = Conv3D(64, (3,3, 3), activation='relu', padding='same',data_format='channels_first')(conv9) conv9 = Conv3D(64, (3,3, 3), activation='relu', padding='same',data_format='channels_first')(conv9)
conv9 = Conv3D(3, (1,1, 1), activation= None, padding= 'same',data_format='channels_first')(conv9) conv9 = Conv3D(3, (1,1, 1), activation= None, padding= 'same',data_format='channels_first')(conv9)
conv9 = Lambda(lambda x: x[:,:,2:30,2:14,2:14])(conv9) conv9 = Lambda(lambda x: x[:,:,2:30,2:14,2:14])(conv9)
#conv9 = Conv3D(3, (5, 5, 5), activation= None, padding='valid',data_format='channels_first')(conv9)
UNET = Model(inputs=inputs, outputs=conv9) UNET = Model(inputs=inputs, outputs=conv9)
#Load the best weights obtained previously
#UNET.load_weights("saved_models/topfacelrs_25_09_2.h5")
#Let's use a decaying learning rate as per the below law #Let's use a decaying learning rate as per the below law
def lr_scheduler(epoch, lr): def lr_scheduler(epoch, lr):
k = 0.001 k = 0.001
...@@ -170,39 +149,6 @@ plt.ylabel('Loss') ...@@ -170,39 +149,6 @@ plt.ylabel('Loss')
plt.legend() plt.legend()
plt.savefig('/home/users/sdeshpande/Deep_CNN/To_clusteroct/plots/topfacee_15_12.png') plt.savefig('/home/users/sdeshpande/Deep_CNN/To_clusteroct/plots/topfacee_15_12.png')
################# Save the Mean Absolute Error and export text file ############
e = np.zeros((n_test,1))
data_predicted = np.zeros((12096 * n_test, 3))
for i in range(n_test):
pred = UNET.predict(X_test[i].reshape(1,3,28,12,12))
pred = pred.reshape(12096)
actual = Y_test[i].reshape(12096)
e[i] = np.mean(abs(pred - actual))
data_predicted[12096 * i:12096 * (i + 1), 0] = X_test[i].reshape(12096)
data_predicted[12096 * i:12096 * (i + 1), 1] = pred
data_predicted[12096 * i:12096 * (i + 1), 2] = actual
MAE = np.mean(e) #came out as 0.00219 for
std = np.std(e,ddof=1) #To get unbiased estimate
#Let's get single prediction time
tic_test = time.time()
single_out = UNET.predict(X_test[0].reshape(1,3,28,12,12))
toc_test = time.time()
output = np.array([MAE,std,toc_train-tic_train,toc_test-tic_test])
np.savetxt("/home/users/sdeshpande/Deep_CNN/To_clusteroct/Txt_outputs/MST_topfacee_15_12.txt", output , delimiter=",")
np.savetxt("/home/users/sdeshpande/Deep_CNN/To_clusteroct/Predictions/topface_15_12.csv", data_predicted, delimiter=",")
####### Dump all the test predictions in the a csv file #####
#Here I can even use the saved weights file to later get predictions from local server
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment