def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None, optimizer=None, lr=None, double=False): if (double): model = self._model_double else: model = self.autoencoder if (optimizer == None or optimizer.lower() == "adadelta"): # optimizer = Adadelta(lr=2.) optimizer = Adadelta(lr=lr) elif (optimizer.lower() == "adam"): optimizer = Adam(lr=lr) elif (optimizer.lower() == "adagrad"): optimizer = Adagrad(lr=lr) if contractive: print('Using contractive loss, lambda: %s' % contractive) model.compile(optimizer=optimizer, loss=contractive_loss(self, contractive)) else: print('Using binary crossentropy') model.compile( optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse # herasCallback = HeraCallback( # 'model-key', # 'localhost', # 4000 # ) history = model.fit( train_X[0], train_X[1], epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(val_X[0], val_X[1]), callbacks=[ # herasCallback, ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'), CustomModelCheckpoint(self.encoder, self.save_model, monitor='val_loss', save_best_only=True, mode='auto') ]).history n_iters = len(history["loss"]) return self, n_iters
def fit_quadruple(self, model, train_data, val_split, nb_epoch=50, batch_size=100, contractive=None, optimizer=None, lr=None): def loss_structure(y_true, y_pred): ''' Hack: This fn doesn't accept additional arguments. We use y_true to pass them. y_pred: Contains ys2-ys1 y_true: Contains (yt2-yt1) difference struct vector ''' min_batch_size = KBack.shape(y_true)[0] return KBack.sqrt( KBack.square( KBack.reshape(KBack.sum(KBack.square(y_pred), axis=-1), [min_batch_size, 1]) - y_true)) # return KBack.sqrt(KBack.square(KBack.reshape(KBack.sum(KBack.sqrt(KBack.square(y_pred)), axis=-1),[min_batch_size, 1]) - y_true)) # model = self._model_quadruple train_data_temp = copy.copy(train_data) train_input = train_data[0] train_reconstructed = train_data[1] graph_vector_input = train_data_temp[2] graph_embedding = np.sum(np.square(graph_vector_input[0] - graph_vector_input[1]), axis=-1) train_output = train_reconstructed + [graph_embedding] # train_output = train_data_temp[1].append(graph_embedding) if (optimizer == None or optimizer.lower() == "adadelta"): # optimizer = Adadelta(lr=2.) if (lr is None): optimizer = Adadelta() else: optimizer = Adadelta(lr=lr) elif (optimizer.lower() == "adam"): if (lr is None): optimizer = Adam() else: optimizer = Adam(lr=lr) elif (optimizer.lower() == "adagrad"): if (lr is None): optimizer = Adagrad() else: optimizer = Adagrad(lr=lr) elif (optimizer.lower() == "rmsprop"): if (lr is None): optimizer = RMSprop() else: optimizer = RMSprop(lr=lr) if contractive: print('Using contractive loss, lambda: %s' % contractive) model.compile(optimizer=optimizer, loss=contractive_loss(self, contractive)) else: print('Using binary crossentropy') model.compile(optimizer=optimizer, loss=[ 'binary_crossentropy', 'binary_crossentropy', loss_structure ], loss_weights=[1., 1., self.alpha ]) # kld, binary_crossentropy, mse # herasCallback = HeraCallback( # 'model-key', # 'localhost', # 4000 # ) # print(train_data_temp[0]) # print(len(train_input)) # print(len(train_output)) history = model.fit( train_input, train_output, epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_split=val_split, callbacks=[ # herasCallback, ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3 * 2, min_lr=0.01), EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5 * 2, verbose=1, mode='auto'), CustomModelCheckpoint(self.encoder, self.save_model, monitor='val_loss', save_best_only=True, mode='auto') ]).history n_iters = len(history["loss"]) return self, history