Skip to content

Commit

Permalink
Bugfixes part 3
Browse files Browse the repository at this point in the history
  • Loading branch information
AdrianAlan committed Jul 20, 2023
1 parent c935d0d commit 4d004ec
Showing 1 changed file with 67 additions and 30 deletions.
97 changes: 67 additions & 30 deletions part3/2023-CoDaS-HEP-Exercises-3.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,30 @@
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ffa6a8f8",
"metadata": {},
"outputs": [],
"source": [
"# If your are running on Google Collab, execute these lines as well\n",
"\n",
"from google.colab import drive\n",
"\n",
"# Mount GDrive\n",
"drive.mount('/content/drive')\n",
"\n",
"# Maker directory\n",
"%mkdir /content/drive/MyDrive/Github/\n",
"\n",
"# Change directory\n",
"%cd /content/drive/MyDrive/Github/\n",
"\n",
"# Clone the repository\n",
"!git clone https://github.com/AdrianAlan/codas-hep-intro-ml-2023.git"
]
},
{
"cell_type": "markdown",
"id": "12013223",
Expand Down Expand Up @@ -301,11 +325,10 @@
"target = np.array([])\n",
"inputs = np.array([])\n",
"\n",
"datafiles = ['Data-MLtutorial/JetDataset/jetImage_7_100p_30000_40000.h5',\n",
" 'Data-MLtutorial/JetDataset/jetImage_7_100p_60000_70000.h5',\n",
" 'Data-MLtutorial/JetDataset/jetImage_7_100p_50000_60000.h5',\n",
" 'Data-MLtutorial/JetDataset/jetImage_7_100p_10000_20000.h5',\n",
" 'Data-MLtutorial/JetDataset/jetImage_7_100p_0_10000.h5']\n",
"datafiles = ['../Data-MLtutorial/JetDataset/jetImage_7_100p_30000_40000.h5',\n",
" '../Data-MLtutorial/JetDataset/jetImage_7_100p_50000_60000.h5',\n",
" '../Data-MLtutorial/JetDataset/jetImage_7_100p_10000_20000.h5',\n",
" '../Data-MLtutorial/JetDataset/jetImage_7_100p_0_10000.h5']\n",
"\n",
"for file_ in datafiles:\n",
" with h5py.File(file_, 'r') as f:\n",
Expand Down Expand Up @@ -417,7 +440,7 @@
"# Keras imports\n",
"\n",
"from tensorflow.keras.models import Model, model_from_json\n",
"from tensorflow.keras.layers import Dense, Input, Conv2D, Dropout, Flatten\n",
"from tensorflow.keras.layers import Dense, Input, Conv2D, Dropout, Flatten, BatchNormalization\n",
"from tensorflow.keras.layers import MaxPooling2D, Activation"
]
},
Expand All @@ -432,10 +455,12 @@
"\n",
"input_ = Input(shape=(100, 100, 1))\n",
"x = Conv2D(5, kernel_size=(5, 5), data_format=\"channels_last\", strides=(1, 1), padding=\"same\")(input_)\n",
"x = BatchNormalization()(x)\n",
"x = Activation('relu')(x)\n",
"x = MaxPooling2D(pool_size = (5, 5))(x)\n",
"x = Dropout(0.25)(x)\n",
"x = Conv2D(3, kernel_size=(3, 3), data_format=\"channels_last\", strides=(1, 1), padding=\"same\")(x)\n",
"x = BatchNormalization()(x)\n",
"x = Activation('relu')(x)\n",
"x = MaxPooling2D( pool_size = (3,3))(x)\n",
"x = Dropout(0.25)(x)\n",
Expand Down Expand Up @@ -475,22 +500,32 @@
"metadata": {},
"outputs": [],
"source": [
"EPOCHS = 1\n",
"EPOCHS = 20\n",
"\n",
"history = model.fit(\n",
" X_train,\n",
" y_train,\n",
" epochs=20,\n",
" epochs=EPOCHS,\n",
" batch_size=32,\n",
" verbose = 2,\n",
" verbose=2,\n",
" validation_data=(X_val, y_val)\n",
")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "19b50b63",
"metadata": {},
"outputs": [],
"source": [
"# Save the trained model\n",
"\n",
"with open(\"Data-MLtutorial/jetTagger_CNN.json\", \"w\") as json_file:\n",
"with open(\"../Data-MLtutorial/jetTagger_CNN.json\", \"w\") as json_file:\n",
" json_file.write(model.to_json())\n",
"model.save_weights(\"Data-MLtutorial/jetTagger_CNN.h5\")\n",
"model.save_weights(\"../Data-MLtutorial/jetTagger_CNN.h5\")\n",
"\n",
"with open('Data-MLtutorial/history.h5', 'wb') as f:\n",
"with open('../Data-MLtutorial/history.h5', 'wb') as f:\n",
" pickle.dump(history.history, f, protocol=pickle.HIGHEST_PROTOCOL)"
]
},
Expand Down Expand Up @@ -603,12 +638,10 @@
"source": [
"# Encoder\n",
"enc_input = Input(shape=(28, 28, 1), name='encoder_input')\n",
"x = Conv2D(64, 5, padding='same', activation='relu')(enc_input)\n",
"x = Conv2D(16, 3, padding='same', strides=2, activation='relu')(x)\n",
"x = Conv2D(8, 3, padding='same', activation='relu')(x)\n",
"x = Conv2D(6, 3, padding='same', activation='relu')(x)\n",
"x = Conv2D(32, 3, padding='same', strides=2, activation='relu')(enc_input)\n",
"x = Conv2D(64, 3, padding='same', strides=2, activation='relu')(x)\n",
"x = Flatten()(x)\n",
"x = Dense(8)(x)\n",
"x = Dense(16)(x)\n",
"\n",
"# Latent space\n",
"latent_dim = 2\n",
Expand All @@ -631,13 +664,11 @@
"# Decoder\n",
"dec_input = Input(shape=(latent_dim,), name='decoder_input')\n",
"\n",
"y = Dense(1176)(dec_input)\n",
"y = Reshape(target_shape=(14, 14, 6))(y)\n",
"y = Conv2DTranspose(6, 3, padding='same', activation='relu')(y)\n",
"y = Conv2DTranspose(8, 3, padding='same', activation='relu')(y)\n",
"y = Conv2DTranspose(16, 3, strides=2, padding='same', activation='relu')(y)\n",
"y = Conv2DTranspose(32, 5, padding='same', activation='relu')(y)\n",
"y = Conv2DTranspose(1, 5, padding='same', activation='relu')(y)\n",
"y = Dense(7 * 7 * 64)(dec_input)\n",
"y = Reshape(target_shape=(7, 7, 64))(y)\n",
"y = Conv2DTranspose(64, 3, padding='same', strides=2, activation='relu')(y)\n",
"y = Conv2DTranspose(32, 3, padding='same', strides=2, activation='relu')(y)\n",
"y = Conv2DTranspose(1, 3, padding='same', activation='sigmoid')(y)\n",
"\n",
"decoder = Model(dec_input, y, name='decoder')\n",
"decoder.summary()"
Expand All @@ -661,12 +692,12 @@
"def loss_func(z_mean, z_logvar):\n",
"\n",
" def vae_reconstruction_loss(y_true, y_pred):\n",
" return K.sum(K.square(y_true - y_pred), axis=[1, 2, 3])\n",
" return K.mean(K.sum(K.square(y_true - y_pred), axis=[1, 2]))\n",
"\n",
" def vae_kl_loss(z_mean, z_logvar):\n",
" return -0.5 * K.sum(1.0 + z_logvar - K.square(z_mean) - K.exp(z_logvar), axis=1)\n",
" return -0.5 * K.mean(1.0 + z_logvar - K.square(z_mean) - K.exp(z_logvar), axis=1)\n",
"\n",
" def vae_loss(y_true, y_predict, beta=4):\n",
" def vae_loss(y_true, y_predict, beta=1):\n",
" reconstruction_loss = vae_reconstruction_loss(y_true, y_predict)\n",
" kl_loss = vae_kl_loss(y_true, y_predict)\n",
" return reconstruction_loss + beta * kl_loss\n",
Expand Down Expand Up @@ -697,7 +728,7 @@
"vae = Model(enc_input, dec_output, name='VAE')\n",
"vae.summary()\n",
"\n",
"opt = tensorflow.keras.optimizers.Adam(learning_rate=0.0001)\n",
"opt = tensorflow.keras.optimizers.Adam()\n",
"vae.compile(optimizer=opt, loss=loss_func(z_mean, z_logvar))"
]
},
Expand Down Expand Up @@ -736,7 +767,13 @@
"source": [
"# Train VAE\n",
"\n",
"history = vae.fit(X_train, X_train, epochs=20, batch_size=32, validation_data=(X_test, X_test))"
"history = vae.fit(\n",
" X_train,\n",
" X_train,\n",
" epochs=30,\n",
" batch_size=128,\n",
" validation_data=(X_test, X_test)\n",
" )"
]
},
{
Expand Down

0 comments on commit 4d004ec

Please sign in to comment.