Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
7
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Open sidebar
Saurabh Deshpande
Deliverable_Rainbow
Compare Revisions
e4e2b3343d737fd29c98dced6f0b957c3c2e63b5...f6a2e2a5ee90c971f0730b36baad8271f74fdc99
Commits (2)
Changes in CNN.py
· f440ef64
Saurabh Deshpande
authored
Jan 22, 2021
f440ef64
Merge
https://gitlab.uni.lu/sdeshpande/deliverable_rainbow
· f6a2e2a5
Saurabh Deshpande
authored
Jan 22, 2021
f6a2e2a5
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
2 additions
and
56 deletions
+2
-56
scripts/CNN_cantilever.py
scripts/CNN_cantilever.py
+2
-56
No files found.
scripts/CNN_cantilever.py
View file @
f6a2e2a5
...
@@ -18,27 +18,10 @@ from keras.callbacks import ModelCheckpoint, LearningRateScheduler
...
@@ -18,27 +18,10 @@ from keras.callbacks import ModelCheckpoint, LearningRateScheduler
np
.
random
.
seed
(
123
)
# for reproducibility
np
.
random
.
seed
(
123
)
# for reproducibility
#def _get_available_gpus():
# """Get a list of available gpu devices (formatted as strings).
#
# # Returns
# A list of available GPU devices.
# """
# #global _LOCAL_DEVICES
# if tfback._LOCAL_DEVICES is None:
# devices = tf.config.list_logical_devices()
# tfback._LOCAL_DEVICES = [x.name for x in devices]
# return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]
#
#
#tfback._get_available_gpus = _get_available_gpus
#tfback._get_available_gpus()
#
##tf.config.experimental_list_devices()
#tf.config.list_logical_devices()
################ Let us import the data to train our ANN ###########################
################ Let us import the data to train our ANN ###########################
#This is not uploaded on gitlab because of heavy size of training data
df
=
pd
.
read_csv
(
'/home/users/sdeshpande/Deep_CNN/CSV_files/topface_converged_nov.csv'
,
names
=
[
'f'
,
'u'
],
df
=
pd
.
read_csv
(
'/home/users/sdeshpande/Deep_CNN/CSV_files/topface_converged_nov.csv'
,
names
=
[
'f'
,
'u'
],
header
=
None
)
header
=
None
)
...
@@ -117,13 +100,9 @@ conv9 = Conv3D(64, (3,3, 3), activation='relu', padding='same',data_format='chan
...
@@ -117,13 +100,9 @@ conv9 = Conv3D(64, (3,3, 3), activation='relu', padding='same',data_format='chan
conv9
=
Conv3D
(
64
,
(
3
,
3
,
3
),
activation
=
'relu'
,
padding
=
'same'
,
data_format
=
'channels_first'
)(
conv9
)
conv9
=
Conv3D
(
64
,
(
3
,
3
,
3
),
activation
=
'relu'
,
padding
=
'same'
,
data_format
=
'channels_first'
)(
conv9
)
conv9
=
Conv3D
(
3
,
(
1
,
1
,
1
),
activation
=
None
,
padding
=
'same'
,
data_format
=
'channels_first'
)(
conv9
)
conv9
=
Conv3D
(
3
,
(
1
,
1
,
1
),
activation
=
None
,
padding
=
'same'
,
data_format
=
'channels_first'
)(
conv9
)
conv9
=
Lambda
(
lambda
x
:
x
[:,:,
2
:
30
,
2
:
14
,
2
:
14
])(
conv9
)
conv9
=
Lambda
(
lambda
x
:
x
[:,:,
2
:
30
,
2
:
14
,
2
:
14
])(
conv9
)
#conv9 = Conv3D(3, (5, 5, 5), activation= None, padding='valid',data_format='channels_first')(conv9)
UNET
=
Model
(
inputs
=
inputs
,
outputs
=
conv9
)
UNET
=
Model
(
inputs
=
inputs
,
outputs
=
conv9
)
#Load the best weights obtained previously
#UNET.load_weights("saved_models/topfacelrs_25_09_2.h5")
#Let's use a decaying learning rate as per the below law
#Let's use a decaying learning rate as per the below law
def
lr_scheduler
(
epoch
,
lr
):
def
lr_scheduler
(
epoch
,
lr
):
k
=
0.001
k
=
0.001
...
@@ -170,39 +149,6 @@ plt.ylabel('Loss')
...
@@ -170,39 +149,6 @@ plt.ylabel('Loss')
plt
.
legend
()
plt
.
legend
()
plt
.
savefig
(
'/home/users/sdeshpande/Deep_CNN/To_clusteroct/plots/topfacee_15_12.png'
)
plt
.
savefig
(
'/home/users/sdeshpande/Deep_CNN/To_clusteroct/plots/topfacee_15_12.png'
)
################# Save the Mean Absolute Error and export text file ############
e
=
np
.
zeros
((
n_test
,
1
))
data_predicted
=
np
.
zeros
((
12096
*
n_test
,
3
))
for
i
in
range
(
n_test
):
pred
=
UNET
.
predict
(
X_test
[
i
].
reshape
(
1
,
3
,
28
,
12
,
12
))
pred
=
pred
.
reshape
(
12096
)
actual
=
Y_test
[
i
].
reshape
(
12096
)
e
[
i
]
=
np
.
mean
(
abs
(
pred
-
actual
))
data_predicted
[
12096
*
i
:
12096
*
(
i
+
1
),
0
]
=
X_test
[
i
].
reshape
(
12096
)
data_predicted
[
12096
*
i
:
12096
*
(
i
+
1
),
1
]
=
pred
data_predicted
[
12096
*
i
:
12096
*
(
i
+
1
),
2
]
=
actual
MAE
=
np
.
mean
(
e
)
#came out as 0.00219 for
std
=
np
.
std
(
e
,
ddof
=
1
)
#To get unbiased estimate
#Let's get single prediction time
tic_test
=
time
.
time
()
single_out
=
UNET
.
predict
(
X_test
[
0
].
reshape
(
1
,
3
,
28
,
12
,
12
))
toc_test
=
time
.
time
()
output
=
np
.
array
([
MAE
,
std
,
toc_train
-
tic_train
,
toc_test
-
tic_test
])
np
.
savetxt
(
"/home/users/sdeshpande/Deep_CNN/To_clusteroct/Txt_outputs/MST_topfacee_15_12.txt"
,
output
,
delimiter
=
","
)
np
.
savetxt
(
"/home/users/sdeshpande/Deep_CNN/To_clusteroct/Predictions/topface_15_12.csv"
,
data_predicted
,
delimiter
=
","
)
####### Dump all the test predictions in the a csv file #####
#Here I can even use the saved weights file to later get predictions from local server