# log-scale the images if desireable config['scaling'] = "minmax" if "np.log" in config['scaling']: images = np.log1p(images) # set tf random seed tf.random.set_seed(config['random_seed']) # ================== Import Data ================== with tf.device(get_tf_device(20)): model = Sequential() model.add(Dense(2, activation='relu', input_shape=(256, ))) model.compile( loss='mse', optimizer='adam', ) print(model.summary()) # Run experiment experiment = Experiment( model=model, config=config, model_type="regression", experiment_name="generate_results_energies_double_linreg", ) experiment.run_kfold( images[double_indices], energies[double_indices], ) experiment.save(save_model=True, save_indices=False) print("Finished experiment:", experiment.id)
model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # Run experiment experiment = Experiment(model=model, config=config, model_type="classification", experiment_name=search_name) experiment.run( normalize_image_data(images[train_idx]), labels[train_idx], normalize_image_data(images[val_idx]), labels[val_idx], ) experiment.save() id_param[experiment.id] = { 'batch_size': b_size, } search_path = get_git_root() + "experiments/searches/" with open(search_path + search_name + ".json", "w") as fp: json.dump(id_param, fp, indent=2)