# DONOTCHANGE (You can decide how often you want to save the model) nsml.save(epoch) def on_batch_end(batch, logs): if batch % 50 == 1 or batch == batches_per_epoch: print("Batch: {}/{}, Loss: {}".format(batch, batches_per_epoch, logs['loss'])) callback = LambdaCallback(on_epoch_end=on_epoch_end, on_batch_end=on_batch_end) model.fit( x=[dataset.input1, dataset.input2], y=dataset.labels, batch_size=config.batch, epochs=config.epochs, verbose=2, callbacks=[callback], validation_split=0.05, shuffle=True, ) # 로컬 테스트 모드일때 사용합니다 # 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다. # [(0.0, 9.045), (0.0, 5.91), ... ] elif config.mode == 'test_local': with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f: reviews = f.readlines() res = nsml.infer(reviews) print(res)
feed_dict={x: data, y_: labels, dropout_rate: config.dropout}) print('Batch : ', i + 1, '/', one_batch_size, ', BCE in this minibatch: ', float(loss)) avg_loss += float(loss) print('epoch:', epoch, ' train_loss:', float(avg_loss/one_batch_size), ' accuracy:', calculate_accuracy(sess.run(output_sigmoid, feed_dict={x: test_data, y_: test_label, dropout_rate: 1.0}), test_label) ) nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs, train__loss=float(avg_loss/one_batch_size), step=epoch) # DONOTCHANGE (You can decide how often you want to save the model) nsml.save(epoch) # 로컬 테스트 모드일때 사용합니다 # 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다. # [(0.3, 0), (0.7, 1), ... ] elif config.mode == 'test_local': with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f: queries = f.readlines() res = nsml.infer(queries) print(res) with open(os.path.join(DATASET_PATH, 'train/train_label')) as f: labels = np.array([[np.float32(x)] for x in f.readlines()]) predictions = np.array(res)[:,1] accuracy = np.array(predictions > config.threshold).sum()/float(len(labels)) print('Accuracy: ', accuracy)
y_: labels }) # _, right_loss = sess.run([train_step, rmse], feed_dict={x: left_data, y_: labels}) loss = float(loss) print('Batch : ', i + 1, '/', one_batch_size, ', RMSE in this minibatch: ', loss) avg_loss += loss print('epoch:', epoch, ' train_loss:', avg_loss / one_batch_size) nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs, train__loss=avg_loss / one_batch_size, step=epoch) # DONOTCHANGE (You can decide how often you want to save the model) nsml.save(epoch) # 로컬 테스트 모드일때 사용합니다 # 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다. # [(0.3, 0), (0.7, 1), ... ] elif config.mode == 'test_local': with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f: queries = f.readlines() res = [] for batch in _batch_loader(queries, config.batch): temp_res = nsml.infer(batch) res += temp_res print(res)
epoch_total=config.epochs, train__loss=float(avg_loss / one_batch_size), step=epoch, accuracy=float(accuracy)) # DONOTCHANGE (You can decide how often you want to save the model) nsml.save(epoch) if not HAS_DATASET and not IS_ON_NSML: local_save(sess, epoch) # 로컬 테스트 모드일때 사용합니다 # 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다. # [(0.3, 0), (0.7, 1), ... ] elif config.mode == 'test_local': if not HAS_DATASET and not IS_ON_NSML: local_load(sess) with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f: queries = f.readlines() res = [] for batch in _batch_loader(queries, config.batch): batch_size = len(batch) if USE_GPU and batch_size < config.batch: batch += [".\t."] * (config.batch - batch_size) temp_res = nsml.infer(batch)[:batch_size] else: temp_res = nsml.infer(batch) res += temp_res print(temp_res)