def train(self, batch_size, epoches, out_name, mini_data): print(mini_data) # learning rate schedule def step_decay(epoch): initial_lrate = 1e-3 drop = 0.5 epochs_drop = 7.0 lrate = initial_lrate * math.pow( drop, math.floor((1 + epoch) / epochs_drop)) return lrate train_dataset = DataGen(self.IMG_SIZE, 5, True, mini=mini_data) train_gen = train_dataset.generator(batch_size, True) TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now()) callbacks_list = [ EvalCallBack(out_name), EarlyStopping(monitor='val_loss', mode='min', patience=6), TensorBoard(log_dir='logs/' + TIMESTAMP, batch_size=batch_size, update_freq='epoch'), LearningRateScheduler(step_decay) ] self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['categorical_accuracy']) self.model.fit_generator( generator=train_gen, steps_per_epoch=train_dataset.get_dataset_size() // batch_size, epochs=epoches, callbacks=callbacks_list)
def train(self, batch_size, model_path, epochs): train_dataset = MPIIDataGen('../../data/mpii/mpii_annotations.json', '../../data/mpii/images', inres=self.inres, outres=self.outres, is_train=True) train_gen = train_dataset.generator(batch_size=batch_size, num_stack=self.num_stacks, sigma=1, is_shuffle=True, rot_flag=True, scale_flag=True, flip_flag=True) csvlogger = CSVLogger( os.path.join( model_path, "csv_train_" + str(datetime.datetime.now().strftime('%H:%M')) + ".csv")) checkpoint = EvalCallBack(model_path, self.inres, self.outres) xcallbacks = [csvlogger, checkpoint] self.model.fit_generator( generator=train_gen, steps_per_epoch=train_dataset.get_dataset_size() // batch_size, epochs=epochs, callbacks=xcallbacks)
def train(self, batch_size, model_path, epochs): train_dataset = MPIIDataGen( "/home/mike/Documents/stacked_hourglass_tf2/data/mpii/mpii_annotations.json", "/home/mike/datasets/mpii_human_pose_v1/images", inres=self.inres, outres=self.outres, is_train=True) train_gen = train_dataset.generator(batch_size, self.num_stacks, sigma=1, is_shuffle=True, rot_flag=True, scale_flag=True, flip_flag=True) # TypeError: expected str, bytes or os.PathLike object, not NoneType csvlogger = CSVLogger( os.path.join( model_path, "csv_train_" + str(datetime.datetime.now().strftime('%H:%M')) + ".csv")) modelfile = os.path.join(model_path, 'weights_{epoch:02d}_{loss:.2f}.hdf5') checkpoint = EvalCallBack(model_path, self.inres, self.outres) xcallbacks = [csvlogger, checkpoint] # ValueError: Failed to find data adapter that can handle input: <class 'NoneType'>, <class 'NoneType'> self.model.fit_generator( generator=train_gen, steps_per_epoch=train_dataset.get_dataset_size() // batch_size, epochs=epochs, callbacks=xcallbacks)
def resume_train(self, batch_size, model_json, model_weights, init_epoch, epochs): self.load_model(model_json, model_weights) self.model.compile(optimizer=Adam(lr=5e-2), loss=mean_squared_error, metrics=["accuracy"]) # dataset_path = os.path.join('D:\\', 'nyu_croped') # dataset_path = '/home/tomas_bordac/nyu_croped' dataset_path = config_reader.load_path() train_dataset = NYUHandDataGen('joint_data.mat', dataset_path, inres=self.inres, outres=self.outres, is_train=True, is_testtrain=True) train_gen = train_dataset.generator(batch_size, self.num_stacks, sigma=3, is_shuffle=True) model_dir = os.path.dirname(os.path.abspath(model_json)) print(model_dir, model_json) csvlogger = CSVLogger( os.path.join( model_dir, "csv_train_" + str(datetime.datetime.now().strftime('%H:%M')) + ".csv")) checkpoint = EvalCallBack(model_dir, self.inres, self.outres) lr_reducer = ReduceLROnPlateau(monitor='loss', factor=0.8, patience=3, verbose=1, cooldown=2, mode='auto') xcallbacks = [csvlogger, checkpoint, lr_reducer] self.model.fit_generator( generator=train_gen, steps_per_epoch=(train_dataset.get_dataset_size() // batch_size) * 4, initial_epoch=init_epoch, epochs=epochs, callbacks=xcallbacks)
def train(self, batch_size, model_path, epochs): train_dataset = MPIIDataGen("../../data/mpii/mpii_annotations.json", "../../data/mpii/images", inres=self.inres, outres=self.outres, is_train=True) train_gen = train_dataset.generator(batch_size, self.num_stacks, sigma=1, is_shuffle=True, rot_flag=True, scale_flag=True, flip_flag=True) csvlogger = CSVLogger(os.path.join(model_path, "csv_train_"+ str(datetime.datetime.now().strftime('%H:%M')) + ".csv")) modelfile = os.path.join(model_path, 'weights_{epoch:02d}_{loss:.2f}.hdf5') checkpoint = EvalCallBack(model_path) xcallbacks = [csvlogger, checkpoint] self.model.fit_generator(generator=train_gen, steps_per_epoch=train_dataset.get_dataset_size()//batch_size, #validation_data=val_gen, validation_steps= val_dataset.get_dataset_size()//batch_size, epochs=epochs, callbacks=xcallbacks)
def resume_train(self, batch_size, model_json, model_weights, init_epoch, epochs): self.load_model(model_json, model_weights) self.model.compile(optimizer=RMSprop(lr=5e-4), loss=mean_squared_error, metrics=["accuracy"]) train_dataset = MPIIDataGen("../../data/mpii/mpii_annotations.json", "../../data/mpii/images", inres=self.inres, outres=self.outres, is_train=True) train_gen = train_dataset.generator(batch_size, self.num_stacks, sigma=1, is_shuffle=True, rot_flag=True, scale_flag=True, flip_flag=True) model_dir = os.path.dirname(os.path.abspath(model_json)) print model_dir , model_json csvlogger = CSVLogger(os.path.join(model_dir, "csv_train_" + str(datetime.datetime.now().strftime('%H:%M')) + ".csv")) checkpoint = EvalCallBack(model_dir) xcallbacks = [csvlogger, checkpoint] self.model.fit_generator(generator=train_gen, steps_per_epoch=train_dataset.get_dataset_size() // batch_size, initial_epoch=init_epoch, epochs=epochs, callbacks=xcallbacks)
def train(self, batch_size, model_path, epochs): # dataset_path = os.path.join('D:\\', 'nyu_croped') # dataset_path = '/home/tomas_bordac/nyu_croped' # dataset_path = '../../data/nyu_croped/' dataset_path = config_reader.load_path() train_dataset = NYUHandDataGen('joint_data.mat', dataset_path, inres=self.inres, outres=self.outres, is_train=True, is_testtrain=True) train_gen = train_dataset.generator(batch_size, self.num_stacks, sigma=3, is_shuffle=True) csvlogger = CSVLogger( os.path.join( model_path, "csv_train_" + str(datetime.datetime.now().strftime('%H:%M')) + ".csv")) modelfile = os.path.join(model_path, 'weights_{epoch:02d}_{loss:.2f}.hdf5') checkpoint = EvalCallBack(model_path, self.inres, self.outres) lr_reducer = ReduceLROnPlateau(monitor='loss', factor=0.8, patience=3, verbose=1, cooldown=2, mode='auto') xcallbacks = [csvlogger, checkpoint, lr_reducer] self.model.fit_generator( generator=train_gen, steps_per_epoch=(train_dataset.get_dataset_size() // batch_size) * 4, epochs=epochs, callbacks=xcallbacks)
def resume_train(self, batch_size, model_json, model_weights, init_epoch, epochs, out_name, mini_data=True): self.load_model(model_json, model_weights) self.model.compile(optimizer=Adam(lr=5e-4), loss='categorical_crossentropy', metrics=["categorical_accuracy"]) train_dataset = DataGen(self.IMG_SIZE, 5, is_train=True, mini=mini_data) train_gen = train_dataset.generator(batch_size, True) model_dir = os.path.dirname(os.path.abspath(model_json)) print(model_dir, model_json) TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now()) callbacks_list = [ EvalCallBack(out_name), EarlyStopping(monitor='val_loss', mode='min', patience=6), TensorBoard(log_dir='logs/' + TIMESTAMP, batch_size=batch_size, update_freq='epoch') ] self.model.fit_generator( generator=train_gen, steps_per_epoch=train_dataset.get_dataset_size() // batch_size, initial_epoch=init_epoch, epochs=epochs, callbacks=callbacks_list)