file_ptr = open(mapping_file, 'r') actions = file_ptr.read().split('\n')[:-1] file_ptr.close() actions_dict = dict() for a in actions: actions_dict[a.split()[1]] = int(a.split()[0]) num_classes = len(actions_dict) trainer = Trainer(num_stages, num_layers, num_f_maps, features_dim, num_classes, pooling_type=pooling_type, dropout=dropout) if args.action == "train": batch_gen = BatchGenerator(num_classes, actions_dict, gt_path, features_path, sample_rate) batch_gen.read_data(vid_list_file) trainer.train(model_dir, batch_gen, num_epochs=num_epochs, batch_size=bz, learning_rate=lr, device=device) if args.action == "predict": trainer.predict(model_dir, results_dir, features_path, vid_list_file_tst, num_epochs, actions_dict, device, sample_rate)
def main(args, device, model_load_dir, model_save_dir, results_save_dir): if args.action == 'train' and args.extract_save_pseudo_labels == 0: # load train dataset and test dataset print(f'Load train data: {args.train_data}') train_loader = DataLoader(args, args.train_data, 'train') print(f'Load test data: {args.test_data}') test_loader = DataLoader(args, args.test_data, 'test') print(f'Start training.') trainer = Trainer( args.num_stages, args.num_layers, args.num_f_maps, args.features_dim, train_loader.num_classes, device, train_loader.weights, model_save_dir ) eval_args = [ args, model_save_dir, results_save_dir, test_loader.features_dict, test_loader.gt_dict, test_loader.eval_gt_dict, test_loader.vid_list, args.num_epochs, device, 'eval', args.classification_threshold, ] batch_gen = BatchGenerator( train_loader.num_classes, train_loader.gt_dict, train_loader.features_dict, train_loader.eval_gt_dict ) batch_gen.read_data(train_loader.vid_list) trainer.train( model_save_dir, batch_gen, args.num_epochs, args.bz, args.lr, device, eval_args, pretrained=model_load_dir) elif args.extract_save_pseudo_labels and args.pseudo_label_type != 'PL': # extract/ generate pseudo labels and save in "data/pseudo_labels" print(f'Load test data: {args.test_data}') test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir) print(f'Extract {args.pseudo_label_type}') if args.pseudo_label_type == 'local': get_save_local_fusion(args, test_loader.features_dict, test_loader.gt_dict) elif args.pseudo_label_type == 'merge': merge_PL_CP(args, test_loader.features_dict, test_loader.gt_dict) elif args.pseudo_label_type == 'CMPL': CMPL(args, test_loader.features_dict, test_loader.gt_dict) elif args.pseudo_label_type == 'CP': extract_CP(args, test_loader.features_dict) print('Self labelling process finished') else: print(f'Load test data: {args.test_data}') test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir) if args.extract_save_pseudo_labels and args.pseudo_label_type == 'PL': print(f'Extract {args.pseudo_label_type}') extract_save_PL = 1 else: print(f'Start inference.') extract_save_PL = 0 trainer = Trainer( args.num_stages, args.num_layers, args.num_f_maps, args.features_dim, test_loader.num_classes, device, test_loader.weights, results_save_dir) trainer.predict( args, model_load_dir, results_save_dir, test_loader.features_dict, test_loader.gt_dict, test_loader.eval_gt_dict, test_loader.vid_list, args.num_epochs, device, 'test', args.classification_threshold, uniform=args.uniform, save_pslabels=extract_save_PL, CP_dict=test_loader.CP_dict, )
actions = file_ptr.read().split('\n')[:-1] # list of classes file_ptr.close() actions_dict = dict() for a in actions: actions_dict[a.split()[1]] = int(a.split()[0]) num_classes = len(actions_dict) # initialize model & trainer model = MultiStageModel(args, num_classes) trainer = Trainer(num_classes) # ====== Main Program ====== # start_time = time.time() if args.action == "train": batch_gen_source = BatchGenerator(num_classes, actions_dict, gt_path, features_path, sample_rate) batch_gen_target = BatchGenerator(num_classes, actions_dict, gt_path, features_path, sample_rate) batch_gen_source.read_data( vid_list_file) # read & shuffle the source training list batch_gen_target.read_data( vid_list_file_target) # read & shuffle the target training list trainer.train(model, model_dir, results_dir, batch_gen_source, batch_gen_target, device, args) if args.action == "predict": predict(model, model_dir, results_dir, features_path, vid_list_file_test, args.num_epochs, actions_dict, device, sample_rate, args) end_time = time.time()
fn='valid.p.soundcorpus.p') len_valid = valid_corpus._get_len() background_noise_corpus = SoundCorpus(cfg.soundcorpus_dir, mode='background', fn='background.p.soundcorpus.p') unknown_corpus = SoundCorpus(cfg.soundcorpus_dir, mode='unknown', fn='unknown.p.soundcorpus.p') silence_corpus = SoundCorpus(cfg.soundcorpus_dir, mode='silence', fn='silence.p.soundcorpus.p') batch_parameters = BatchParams() advanced_gen = BatchGenerator(batch_parameters, train_corpus, background_noise_corpus, unknown_corpus, SilenceCorpus=None) encoder = train_corpus.encoder decoder = train_corpus.decoder num_classes = len(decoder) - 1 # set_graph Graph batch_size = batch_parameters.batch_size is_training = cfg.is_training max_gradient = cfg.max_gradient training_iters = train_corpus.len
file_ptr = open(mapping_file, 'r') actions = file_ptr.read().split('\n')[:-1] file_ptr.close() actions_dict = dict() for a in actions: actions_dict[a.split()[1]] = int(a.split()[0]) num_classes = len(actions_dict) # train trainer = Trainer(num_stages, num_layers, num_f_maps, features_dim, num_classes) no_change = 1 if args.action == "train": batch_gen = BatchGenerator(num_classes, actions_dict, segmentation_path, features_path, sample_rate) batch_gen.read_data(vid_list_file) weights = batch_gen.set_class_weights() trainer.ce(weight=weights) while (no_change): trainer.train(model_dir, batch_gen, num_epochs=num_epochs, batch_size=bz, learning_rate=lr, device=device) trainer.predict(model_dir, temp_results_dir, features_path, vid_list_file, num_epochs, actions_dict, device, sample_rate) utils.generate_target(segmentation_path, temp_results_dir, vid_list_file)
def main(): config = get_arg() config.save_folder = os.path.join(config.save_folder, config.model) if not os.path.exists(config.save_folder): os.makedirs(config.save_folder) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device == "cuda": torch.set_default_tensor_type("torch.cuda.FloatTensor") else: torch.set_default_tensor_type("torch.FloatTensor") # Traindataset = featDataset(mode="train", feat_model=config.model) num_classes = 11 actions_dict = { "opening": 0, "moving": 1, "hidden": 2, "painting": 3, "battle": 4, "respawn": 5, "superjump": 6, "object": 7, "special": 8, "map": 9, "ending": 10, } actions_dict = utils.label_to_id gt_path = "../../../data/training/feature_ext/vgg" features_path = "../../../data/training/feature_ext/vgg" Traindataset = BatchGenerator(num_classes, actions_dict, gt_path, features_path) Traindataset.read_data() Testdataset = BatchGenerator(num_classes, actions_dict, gt_path, features_path) Testdataset.read_data(mode="test") num_stages = 2 num_layers = 2 num_f_maps = 8 features_dim = 4 num_f_maps = 64 features_dim = 512 * 8 * 8 # num_f_maps = 512 * 8 * 8 # features_dim = 2048 model = LSTMclassifier(1, 1, 256) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=config.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9) criterion = nn.CrossEntropyLoss() best_eval = 0 for epoch in range(1, 1 + config.epochs): print("epoch:", epoch) t0 = time.time() train( model=model, optimizer=optimizer, criterion=criterion, dataset=Traindataset, config=config, device=device, # dataset_perm=dataset_perm, ) t1 = time.time() scheduler.step() print(f"\nlr: {scheduler.get_last_lr()}") t1 = time.time() print(f"\ntraining time :{round(t1 - t0)} sec") best_eval = test( model=model, dataset=Testdataset, config=config, device=device, best_eval=best_eval, )
def __init__(self): self.cfg = Config() self.h_params = Hparams() self.batch_params = BatchParams() self.display_params = DisplayParams() if not os.path.exists(self.cfg.logs_path): os.makedirs(self.cfg.logs_path) self.write_config() self.graph = tf.Graph() # self.tf_seed = tf.set_random_seed(self.h_params.tf_seed) deprecated self.batch_shape = ( None, ) + self.batch_params.dims_input_transformation self.baseline = Baseline(self.h_params) self.infos = self._load_infos() self.train_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='train') self.valid_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='valid', fn='valid.pf.soundcorpus.p') self.len_valid = self.valid_corpus._get_len() self.noise_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='background', fn='background.pf.soundcorpus.p') self.unknown_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='unknown', fn='unknown.pf.soundcorpus.p') self.test_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='own_test', fn='own_test_fname.p.soundcorpus.p') self.fname2label = self._load_fname2label() len_test = self.test_corpus._get_len() test_gen = self.test_corpus.batch_gen( len_test, input_transformation='filterbank', dims_input_transformation=self.batch_params. dims_input_transformation) self.test_batch_x, test_batch_y = next(test_gen) self.test_batch_y = [self.fname2label[b] for b in test_batch_y] self.advanced_gen = BatchGenerator(self.batch_params, self.train_corpus, self.noise_corpus, self.unknown_corpus) if self.advanced_gen.train_silence_detection: self.test_batch_y = [ 1 if i == 11 else 0 for i in self.test_batch_y ] if self.cfg.preprocessed: self.advanced_gen = self.corpus_gen('test.p') self.encoder = self.infos['name2id'] self.decoder = self.infos['id2name'] if self.batch_params.portion_silence == 0: self.num_classes = len(self.decoder) - 1 #11 else: self.num_classes = len(self.decoder) if self.advanced_gen.train_silence_detection: self.num_classes = 2 self.training_iters = self.train_corpus.len self.result = None
class Model: def __init__(self): self.cfg = Config() self.h_params = Hparams() self.batch_params = BatchParams() self.display_params = DisplayParams() if not os.path.exists(self.cfg.logs_path): os.makedirs(self.cfg.logs_path) self.write_config() self.graph = tf.Graph() # self.tf_seed = tf.set_random_seed(self.h_params.tf_seed) deprecated self.batch_shape = ( None, ) + self.batch_params.dims_input_transformation self.baseline = Baseline(self.h_params) self.infos = self._load_infos() self.train_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='train') self.valid_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='valid', fn='valid.pf.soundcorpus.p') self.len_valid = self.valid_corpus._get_len() self.noise_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='background', fn='background.pf.soundcorpus.p') self.unknown_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='unknown', fn='unknown.pf.soundcorpus.p') self.test_corpus = SoundCorpus(self.cfg.soundcorpus_dir, mode='own_test', fn='own_test_fname.p.soundcorpus.p') self.fname2label = self._load_fname2label() len_test = self.test_corpus._get_len() test_gen = self.test_corpus.batch_gen( len_test, input_transformation='filterbank', dims_input_transformation=self.batch_params. dims_input_transformation) self.test_batch_x, test_batch_y = next(test_gen) self.test_batch_y = [self.fname2label[b] for b in test_batch_y] self.advanced_gen = BatchGenerator(self.batch_params, self.train_corpus, self.noise_corpus, self.unknown_corpus) if self.advanced_gen.train_silence_detection: self.test_batch_y = [ 1 if i == 11 else 0 for i in self.test_batch_y ] if self.cfg.preprocessed: self.advanced_gen = self.corpus_gen('test.p') self.encoder = self.infos['name2id'] self.decoder = self.infos['id2name'] if self.batch_params.portion_silence == 0: self.num_classes = len(self.decoder) - 1 #11 else: self.num_classes = len(self.decoder) if self.advanced_gen.train_silence_detection: self.num_classes = 2 self.training_iters = self.train_corpus.len self.result = None def _load_infos(self): with open(self.cfg.soundcorpus_dir + 'infos.p', 'rb') as f: infos = pickle.load(f) return infos def _load_fname2label(self): with open(self.cfg.soundcorpus_dir + 'fname2label.p', 'rb') as f: fname2label = pickle.load(f) return fname2label def save(self, sess, epoch): print('saving model...', end='') model_name = 'model_%s_bsize%s_e%s.ckpt' % ( 'mfcc', self.batch_params.batch_size, epoch) s_path = self.saver.save(sess, self.cfg.logs_path + model_name) print("Model saved in file: %s" % s_path) @staticmethod def class2list(class_): class_list = [[item, class_.__dict__[item]] for item in sorted(class_.__dict__) if not item.startswith('__')] return class_list def get_config(self): config_list = [] for line in self.class2list(Config): config_list.append(line) for line in self.class2list(Hparams): config_list.append(line) for line in self.class2list(DisplayParams): config_list.append(line) for line in self.class2list(BatchParams): config_list.append(line) return config_list def add_experiment_to_csv(self): with open('model_runs.csv', 'a') as csvfile: writer = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL) config_list = self.get_config() result_list = self.result #writer.writerow([c[0] for c in config_list]) writer.writerow([c[1] for c in config_list] + [r[1] for r in result_list]) def preprocess(self, fn='preprocessed_batch_corpus.p'): batch_gen = self.advanced_gen.batch_gen() with open(fn, 'wb') as f: pickler = pickle.Pickler(f) tic = time.time() for epoch in range(self.h_params.epochs): toc = time.time() logging.info('epoch %s - time needed %s' % (epoch, toc - tic)) step = 1 # Keep training until reach max iterations tic = time.time() while step * self.batch_params.batch_size < self.training_iters: # for (batch_x,batch_y) in batch_gen: batch_x, batch_y = next(batch_gen) pickler.dump((batch_x, batch_y)) step += 1 def write_result_to_csv(self, row): with open(self.cfg.logs_path + 'results.csv', 'a') as csvfile: writer = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL) #writer.writerow([c[0] for c in config_list]) writer.writerow(row) class corpus_gen: def __init__(self, fn): self.fn = fn def gen_corpus(self): with open(self.fn, 'rb') as f: unpickler = pickle.Unpickler(f) while True: data = unpickler.load() yield data def batch_gen(self): gen = self.gen_corpus() while True: try: batch = next(gen) except EOFError: print('restarting gen') gen = self.gen_corpus() batch = next(gen) yield batch def write_config(self): with open(os.path.join(self.cfg.logs_path, 'config.txt'), 'w') as f: f.write('Baseline = {}\n'.format(Baseline.__name__)) f.write('\n') f.write('Config\n') for line in self.class2list(Config): f.write('{} = {}\n'.format(line[0], line[1])) f.write('\n') f.write('HParams\n') for line in self.class2list(Hparams): f.write('{} = {}\n'.format(line[0], line[1])) f.write('\n') f.write('DisplayParams\n') for line in self.class2list(DisplayParams): f.write('{} = {}\n'.format(line[0], line[1])) f.write('\n') f.write('BatchParams\n') for line in self.class2list(BatchParams): f.write('{} = {}\n'.format(line[0], line[1])) def restore(self, sess, fn_model): self.saver.restore(sess, fn_model) print("Model restored.") def predict(self, batch_x_iter, fn_model): with tf.Session(graph=self.graph) as sess: self.restore(sess, fn_model) predictions = [] k_batch = 0 try: for batch_x in batch_x_iter: if k_batch % 100 == 0: logging.info(str(k_batch)) prediction = sess.run([self.pred], feed_dict={ self.x: batch_x, self.keep_prob: 1.0 }) print(prediction) for k, p in enumerate(prediction[0]): predictions.append([batch_x[k], self.decoder[p]]) k_batch += 1 except EOFError: pass return predictions def train(self): with tf.Session(graph=self.graph, ) as sess: logging.info('Start training') self.init = tf.global_variables_initializer() train_writer = tf.summary.FileWriter(self.cfg.logs_path + 'train/', graph=self.graph) valid_writer = tf.summary.FileWriter(self.cfg.logs_path + 'valid/') sess.run(self.init) global_step = 0 batch_gen = self.advanced_gen.batch_gen() for epoch in range(1, self.h_params.epochs): step = 1 # Keep training until reach max iterations current_time = time.time() while step * self.batch_params.batch_size < self.training_iters: # for (batch_x,batch_y) in batch_gen: batch_x, batch_y = next(batch_gen) # logging.info('epoch ' + str(epoch) + ' - step ' + str(step)) # batch_x, batch_y = next(gen.batch_gen()) # Run optimization op (backprop) summary_, _ = sess.run( [self.summaries, self.optimizer], feed_dict={ self.x: batch_x, self.y: batch_y, self.keep_prob: self.h_params.keep_prob }) train_writer.add_summary(summary_, global_step) if step % self.display_params.print_step == 0: # Calculate batch accuracy logging.info('epoch %s - step %s' % (epoch, step)) logging.info('runtime for batch of ' + str(self.batch_params.batch_size * self.display_params.print_step) + ' ' + str(time.time() - current_time)) current_time = time.time() c, acc, cm = sess.run( [self.cost, self.accuracy, self.confusion_matrix], feed_dict={ self.x: batch_x, self.y: batch_y, self.keep_prob: self.h_params.keep_prob }) print(c, acc) for k in range(self.num_classes): print( str(self.decoder[k]) + ' ' + str(cm[k, k] / sum(cm[k, :]))) if self.display_params.print_confusion_matrix: print(cm) print(' ') #c_test, acc_test, cm_test = sess.run([self.cost, self.accuracy, self.confusion_matrix], # feed_dict={self.x: self.test_batch_x, # self.y: self.test_batch_y, # self.keep_prob: 1}) #print(' ') #print("test:", c_test, acc_test) #print(cm_test) #model_name = 'model_%s_bsize%s_e%s_s%s.ckpt' % ('mfcc', self.batch_params.batch_size, epoch,step) #s_path = self.saver.save(sess, self.cfg.logs_path + model_name) #print('saving under ' + s_path) step += 1 global_step += 1 # if epoch % cfg.epochs_per_save == 0: self.save(sess, epoch) val_batch_gen = self.valid_corpus.batch_gen( self.batch_params.batch_size, input_transformation='filterbank', dims_input_transformation=self.batch_params. dims_input_transformation) val_batch_x, val_batch_y = next(val_batch_gen) summary_val, c_val, acc_val, cm_val = sess.run( [ self.summaries, self.cost, self.accuracy, self.confusion_matrix ], feed_dict={ self.x: val_batch_x, self.y: val_batch_y, self.keep_prob: 1 }) valid_writer.add_summary(summary_val, global_step) print("validation:", c_val, acc_val) print(cm_val) for k in range(11): print( str(self.decoder[k]) + ' ' + str(cm_val[k, k] / sum(cm_val[k, :]))) c_test, acc_test, cm_test = sess.run( [self.cost, self.accuracy, self.confusion_matrix], feed_dict={ self.x: self.test_batch_x, self.y: self.test_batch_y, self.keep_prob: 1 }) print(' ') print("test:", c_test, acc_test) print(cm_test) for k in range(12): print( str(self.decoder[k]) + ' ' + str(cm_test[k, k] / sum(cm_test[k, :]))) row = [acc_test] + [ cm_test[k, k] / sum(cm_test[k, :]) for k in range(12) ] self.write_result_to_csv(row) print(' ') #print(self.lr) if epoch % self.batch_params.unknown_change_epochs == 0: self.advanced_gen.portion_unknown = self.advanced_gen.portion_unknown * self.batch_params.unknown_change_rate print("Optimization Finished!") #self.result = [['train_acc',acc],['val_acc',acc_val]] pass #def debug(self): # with tf.Session(graph=graph) as sess: # init = tf.global_variables_initializer() # sess.run(init) # batch_gen = advanced_gen.batch_gen() # batch_x, batch_y = next(batch_gen) # l, kw = sess.run([logits, krw], feed_dict={x: batch_x, y: batch_y, keep_prob: cfg.keep_prob}) # return l, kw # pass def set_graph(self): logging.info('Setting Graph Variables') with self.graph.as_default(): # tf Graph input tf.set_random_seed(self.h_params.tf_seed) with tf.name_scope("Input"): self.x = tf.placeholder(tf.float32, shape=self.batch_shape, name="input") self.y = tf.placeholder(tf.int64, shape=(None, ), name="input") self.keep_prob = tf.placeholder(tf.float32, name="dropout") class_weights = tf.constant(self.h_params.class_weights) weights = tf.gather(class_weights, self.y) with tf.variable_scope('logit'): self.logits = self.baseline.calc_logits( self.x, self.keep_prob, self.num_classes) with tf.variable_scope('costs'): self.xent = tf.losses.sparse_softmax_cross_entropy( labels=self.y, logits=self.logits, weights=weights) self.cost = tf.reduce_mean(self.xent, name='xent') if self.h_params.reg_constant is not None: self.cost = self.cost + tf.add_n( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) tf.summary.scalar('cost', self.cost) with tf.variable_scope('acc'): self.pred = tf.argmax(self.logits, 1) self.correct_prediction = tf.equal(self.pred, tf.reshape(self.y, [-1])) self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32), name='accu') self.confusion_matrix = tf.confusion_matrix( tf.reshape(self.y, [-1]), self.pred, self.num_classes) tf.summary.scalar('accuracy', self.accuracy) with tf.variable_scope('acc_per_class'): for i in range(self.num_classes): acc_id = self.confusion_matrix[i, i] / tf.reduce_sum( self.confusion_matrix[i, :]) tf.summary.scalar(self.decoder[i], acc_id) # train ops self.gradients = tf.gradients(self.cost, tf.trainable_variables()) tf.summary.scalar('grad_norm', tf.global_norm(self.gradients)) # gradients, _ = tf.clip_by_global_norm(raw_gradients,max_gradient, name="clip_gradients") # gradnorm_clipped = tf.global_norm(gradients) # tf.summary.scalar('grad_norm_clipped', gradnorm_clipped) self.iteration = tf.Variable(0, dtype=tf.int64, name="iteration", trainable=False) self.lr_ = tf.Variable(self.h_params.learning_rate, dtype=tf.float64, name="lr_", trainable=False) decay = tf.Variable(self.h_params.lr_decay_rate, dtype=tf.float64, name="decay", trainable=False) steps_ = tf.Variable(self.h_params.lr_change_steps, dtype=tf.int64, name="setps_", trainable=False) self.lr = tf.train.exponential_decay(self.lr_, self.iteration, steps_, decay, staircase=True) tf.summary.scalar('learning_rate', self.lr) self.optimizer = tf.train.AdamOptimizer( learning_rate=self.lr).apply_gradients( zip(self.gradients, tf.trainable_variables()), name="train_step", global_step=self.iteration) self.saver = tf.train.Saver(max_to_keep=self.cfg.max_ckpt_to_keep) self.summaries = tf.summary.merge_all() logging.info('Done')
for l in zip(model.metrics_names, logs): named_logs[prefix+l[0]] = l[1] return named_logs checkpoint_path = "saved_models/ResNet_sp/checkpoints" os.makedirs(checkpoint_path, exist_ok=True) latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path) model.load_weights(latest_checkpoint) h5 = tables.open_file("database.h5",'r') train_table = h5.get_node("/train/specs") val_table = h5.get_node("/val/specs") test_table = h5.get_node("/test/specs") train_generator = BatchGenerator(train_table, 128, transform_batch, y_field="sp", shuffle=True, refresh_on_epoch_end=True ) val_generator = BatchGenerator(val_table, 128, transform_batch, y_field="sp", shuffle=True, refresh_on_epoch_end=True ) test_generator = BatchGenerator(train_table, 128, transform_batch, y_field="sp", shuffle=True, refresh_on_epoch_end=True ) metrics_names = model.metrics_names for epoch in range(20): #Reset the metric accumulators model.reset_metrics() for train_batch_id in range(train_generator.n_batches): train_X, train_Y = next(train_generator) train_result = model.train_on_batch(train_X, train_Y)
def main(): config = get_arg() config.save_folder = os.path.join(config.save_folder, config.model) if not os.path.exists(config.save_folder): os.makedirs(config.save_folder) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device == "cuda": torch.set_default_tensor_type("torch.cuda.FloatTensor") else: torch.set_default_tensor_type("torch.FloatTensor") # Traindataset = featDataset(mode="train", feat_model=config.model) num_classes = 11 actions_dict = { "opening": 0, "moving": 1, "hidden": 2, "painting": 3, "battle": 4, "respawn": 5, "superjump": 6, "object": 7, "special": 8, "map": 9, "ending": 10, } gt_path = "../../../data/training/feature_ext/vgg" features_path = "../../../data/training/feature_ext/vgg" Traindataset = BatchGenerator(num_classes, actions_dict, gt_path, features_path) Traindataset.read_data() Testdataset = BatchGenerator(num_classes, actions_dict, gt_path, features_path) Testdataset.read_data(mode="test") # while Traindataset.has_next(): # batch_input, batch_target = Traindataset.next_batch(config.batch_size) # Testdataset = featDataset(mode="test", feat_model=config.model) # model = featModel(input_channel=1280) num_stages = 2 num_layers = 2 num_f_maps = 8 features_dim = 4 num_f_maps = 64 features_dim = 512 * 8 * 8 # num_f_maps = 512 * 8 * 8 # features_dim = 2048 # model = MultiStageModel( # num_stages, num_layers, num_f_maps, features_dim, num_classes # ) model = TCN(features_dim, 11, [20]) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=config.lr) criterion = nn.CrossEntropyLoss() best_eval = 0 for epoch in range(1, 1 + config.epochs): print("epoch:", epoch) t0 = time.time() train( model=model, optimizer=optimizer, criterion=criterion, dataset=Traindataset, config=config, device=device, # dataset_perm=dataset_perm, ) t1 = time.time() print("\ntraining time :", round(t1 - t0)) best_eval = test( model=model, dataset=Testdataset, config=config, device=device, best_eval=best_eval, )