for i in range(number_of_batches): op_start_time = time.time() batch_x, batch_y = data.train.next_batch(batch_size) model.train_on_batch( batch_x.reshape(-1, img_height, img_width, channels), batch_y) # Log (log :)) loss, current position and times op_time, h, m, s = get_times(op_start_time, start_time) #if (i + 1) % 50 == 0: loss, acc = model.evaluate( batch_x.reshape(-1, img_height, img_width, channels), batch_y, batch_size) lr = get_lr(i + 1) logger.color_print( logger.Bold, 'epoch {} | batch {}/{} | loss: {:.2f} | acc: {} | lr: {:.4E} | {:.2f}s | {:02d}:{:02d}:{:02d}' .format(epoch + 1, i + 1, number_of_batches, loss, acc, lr, op_time, h, m, s)) #else: # print('epoch {} | batch {}/{} | {:.2f}s | {:02d}:{:02d}:{:02d}' # .format(epoch + 1, i + 1, number_of_batches, op_time, h, m, s)) # Approximate train log score with ~1/4 dataset size for efficiency evaluate(data.train, data.train.get_number_of_batches(batch_size)) evaluate(data.valid, data.valid.get_number_of_batches(batch_size)) score_data['lr'] += [get_lr(data.train.get_number_of_batches(batch_size))] #score_data['f1_score'] += [f1_score(...)] #scores_path = scores_template.format(epoch, batch_size, opt_name, lr_starting, lr_decay) #print('Saving scores (train/valid loss, lr and f1-score) to {}'.format(scores_path))
for i in range(number_of_batches): op_start_time = time.time() batch_x, batch_y = data.train.next_batch(batch_size) model.train_on_batch( batch_x.reshape(-1, img_height, img_width, channels), batch_y) # Log (log :)) loss, current position and times op_time, overall_h, overall_m, overall_s = get_times( op_start_time, start_time) if (i + 1) % 50 == 0: loss = model.evaluate( batch_x.reshape(-1, img_height, img_width, channels), batch_y, batch_size) logger.color_print( logger.Info, 'epoch {0} | batch {1} / {2} | loss: {3:.2f} | {4:.2f}s | {5:02d}:{6:02d}:{7:02d}' .format(epoch + 1, i + 1, number_of_batches, loss, op_time, overall_h, overall_m, overall_s)) else: print( 'epoch {0} | batch {1} / {2} | {3:.2f}s | {4:02d}:{5:02d}:{6:02d}' .format(epoch + 1, i + 1, number_of_batches, op_time, overall_h, overall_m, overall_s)) # Approximate train log score with ~1/4 dataset size for efficiency #log_score(data.train, iter_limit=data.train.get_number_of_batches(batch_size) // 4) #log_score(data.valid) current_lr = lr_starting * ( lr_decay**data.train.get_number_of_batches(batch_size))**(epoch + 1) logger.color_print(logger.Info, 'Current learning rate: {}'.format(current_lr))