예제 #1
0
patches_val_aug, patches_val_ref_aug = bal_aug_patches(percent, patch_size,
                                                       patches_val,
                                                       patches_val_ref)
patches_val_ref_aug_h = tf.keras.utils.to_categorical(patches_val_ref_aug,
                                                      number_class)

#%%
start_time = time.time()
exp = 1
rows = patch_size
cols = patch_size
adam = Adam(lr=0.0001, beta_1=0.9)
batch_size = 8

weights = [0.5, 0.5, 0]
loss = weighted_categorical_crossentropy(weights)
model = unet((rows, cols, channels))
model.compile(optimizer=adam, loss=loss, metrics=['accuracy'])
# print model information
model.summary()
filepath = 'models/'
# define early stopping callback
earlystop = EarlyStopping(monitor='val_loss',
                          min_delta=0.0001,
                          patience=10,
                          verbose=1,
                          mode='min')
checkpoint = ModelCheckpoint(filepath + 'unet_exp_' + str(exp) + '.h5',
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
예제 #2
0
if conf.unetxst_homographies is not None:
    uNetXSTHomographies = utils.load_module(conf.unetxst_homographies)
    model = architecture.get_network(
        (conf.image_shape[0], conf.image_shape[1], n_classes_input),
        n_classes_label,
        n_inputs=n_inputs,
        thetas=uNetXSTHomographies.H)
else:
    model = architecture.get_network(
        (conf.image_shape[0], conf.image_shape[1], n_classes_input),
        n_classes_label)
if conf.model_weights is not None:
    model.load_weights(conf.model_weights)
optimizer = tf.keras.optimizers.Adam(learning_rate=conf.learning_rate)
if conf.loss_weights is not None:
    loss = utils.weighted_categorical_crossentropy(conf.loss_weights)
else:
    loss = tf.keras.losses.CategoricalCrossentropy()
metrics = [
    tf.keras.metrics.CategoricalAccuracy(),
    utils.MeanIoUWithOneHotLabels(num_classes=n_classes_label)
]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
print(f"Compiled model {os.path.basename(conf.model)}")

# create output directories
model_output_dir = os.path.join(conf.output_dir,
                                datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
tensorboard_dir = os.path.join(model_output_dir, "TensorBoard")
checkpoint_dir = os.path.join(model_output_dir, "Checkpoints")
if not os.path.exists(tensorboard_dir):
예제 #3
0
# POS Tags to numerical sequences
pos_tokenizer = Tokenizer()
pos_tokenizer.fit_on_texts(z_pos)
pos_sequences = pos_tokenizer.texts_to_sequences(z_pos)
z_test = to_categorical(pos_sequences)

# TODO: Needs to come from the train I assume
class_weights = list(
    utils.get_class_weights(c_test.label_list, WEIGHT_SMOOTHING).values())
print('loss_weight {}'.format(class_weights))

# Load model and Embeddings
model = load_model('naacl_metaphor.h5',
                   custom_objects={
                       'loss':
                       utils.weighted_categorical_crossentropy(class_weights),
                       'f1':
                       utils.f1,
                       'precision':
                       utils.precision,
                       'recall':
                       utils.recall
                   })

# Generate list of label predictions for each sentence
float_predictions = model.predict(x_test, batch_size=KERAS_BATCH_SIZE)
binary_predictions = kerasbackend.argmax(float_predictions)
label_predictions = kerasbackend.eval(binary_predictions)

# Write prediction to CSV file
predictions_file = 'predictions.csv'
			    while True:
			        T1,lbl = next(T1_generator)
			        T2, demo = next(T2_generator)
			        yield [T1, T2, demo], lbl  #Yield both images and their mutual label
			        
			model1 = ResNet50()
			model2 = ResNet50()

			model = bothIms_and_Demo(model1, model2, dr = 0.5, nb_classes = 2, kr = kr, ar = ar, nd = 10)


			optim = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
			    
			weights = 1/hist_type(np.array(tstLs+trnLs))
			weights = weights/np.linalg.norm(weights)*np.sqrt(len(weights))
			model.compile(optimizer=optim, loss=weighted_categorical_crossentropy(weights), metrics=['accuracy'])
			    
			chknm='../h5/' +sv_root+'mult'+strftime("%H_%M_%S", gmtime())
			callbacks = [
			        ModelCheckpoint(chknm, monitor='val_loss', save_best_only=True, verbose=0),
			        ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=40, min_lr=1e-9, epsilon=0.00001, verbose=1, mode='min'),
			        EarlyStopping(monitor='val_loss', min_delta=0, patience=300, verbose=1),
			    ]

			    
			history = model.fit_generator(
			    generate_generator_multiple(),
			    epochs=10000,#10000
			    steps_per_epoch=np.ceil(len(trnDs)/32),
			    validation_data=([T1_val, T2_val, np.stack(valDs,axis=0)], y_val),
			    verbose=2,
예제 #5
0
    def train(self,
              source,
              target,
              out_dir,
              epochs=100,
              batch_size=32,
              dense=False,
              log_name='log.csv',
              model_name='sparse_unet'):

        print("Num GPUs Available: ",
              len(tf.config.list_physical_devices('GPU')))

        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        if not os.path.exists(os.path.join(out_dir, 'ckpt')):
            os.makedirs(os.path.join(out_dir, 'ckpt'))

        train_generator = SparseTiffDataGenerator(source,
                                                  target,
                                                  batch_size=batch_size,
                                                  shape=self.shape,
                                                  dense=dense,
                                                  augment=False)

        val_generator = SparseTiffDataGenerator(source,
                                                target,
                                                batch_size=batch_size,
                                                shape=self.shape,
                                                dense=dense,
                                                augment=False,
                                                is_val=True)

        sample_batch = val_generator[0][0]
        sample_img = SampleImageCallback(self.model,
                                         sample_batch,
                                         out_dir,
                                         save=True)

        weight_zero, weight_one, weight_two = train_generator.batch_weights()

        optim = Adam(learning_rate=0.001)

        self.model.compile(optimizer=optim,
                           loss=weighted_categorical_crossentropy(
                               np.array([weight_zero, weight_one,
                                         weight_two])),
                           metrics=[dice_coefficient])

        self.model.summary()

        # self.model.load_weights(model_name)

        csv_logger = CSVLogger(os.path.join(out_dir, log_name))

        ckpt_name = 'ckpt/' + model_name + '_epoch_{epoch:02d}_val_loss_{val_loss:.4f}.hdf5'

        model_ckpt = ModelCheckpoint(os.path.join(out_dir, ckpt_name),
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True)

        self.model.fit(train_generator,
                       validation_data=val_generator,
                       validation_steps=math.floor(len(val_generator)) /
                       batch_size,
                       epochs=epochs,
                       shuffle=False,
                       callbacks=[csv_logger, model_ckpt, sample_img])
예제 #6
0
			X_test = dataset[keys]['X']
			y_test = dataset[keys]['y']

	for f in os.listdir(models_folder):
		if f.endswith('.h5'):
			model_name = models_folder + f

	'''
	hf = h5py.File(data_folder + 'dataset.h5' ,'r')
	
	# Load datasets and true outputs
	X_test = hf[('X_test')]
	y_test = hf[('y_test')]
	'''
	# Load model
	model = keras.models.load_model(model_name, custom_objects={'loss': weighted_categorical_crossentropy(weights)})
	likelihood = model.predict(X_test[:,:,2:])

	# Join events together from same timeseries
	
	# Add time and uid to the likelihood array
	time = (X_test[:,:,1])[:, np.newaxis]
	time = np.swapaxes(time, 1, -1)

	uid = (X_test[:,:,0])[:, np.newaxis]
	uid = np.swapaxes(uid, 1, -1)

	y_est = np.concatenate((uid, time, likelihood),-1)

	max_time = int(max(y_est[-1,:,1]) * 1000) # in ms