x_test = np.array(x_test) #******************************************* # Define traing params #******************************************* learning_rate = 0.00002 batch_size = 6 display_step = 1 epochs = 100 class_weights = [1, 1, 12] generator = BalanceCovidDataset(data_dir=data_path, csv_file=train_csv, covid_percent=0.3, class_weights=class_weights, batch_size=batch_size) total_batch = len(generator) model = keras_model_build() opt = tf.keras.optimizers.Adam(lr=learning_rate) # ******************************************** # Train model #********************************************* model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) hist = model.fit(generator,
# For detection of no pneumonia/non-COVID-19 pneumonia/COVID-19 pneumonia mapping = { 'normal': 0, 'pneumonia': 1, 'COVID-19': 2 } class_weights = [1., 1., args.covid_weight] else: raise Exception('''COVID-Net currently only supports 2 class COVID-19 positive/negative detection or 3 class detection of no pneumonia/non-COVID-19 pneumonia/COVID-19 pneumonia''') generator = BalanceCovidDataset(data_dir=args.datadir, csv_file=args.trainfile, batch_size=batch_size, input_shape=(args.input_size, args.input_size), n_classes=args.n_classes, mapping=mapping, covid_percent=args.covid_percent, class_weights=class_weights, top_percent=args.top_percent) with tf.Session() as sess: tf.get_default_graph() saver = tf.train.import_meta_graph(os.path.join(args.weightspath, args.metaname)) graph = tf.get_default_graph() image_tensor = graph.get_tensor_by_name(args.in_tensorname) labels_tensor = graph.get_tensor_by_name(args.label_tensorname) sample_weights = graph.get_tensor_by_name(args.weights_tensorname) pred_tensor = graph.get_tensor_by_name(args.logit_tensorname)
# output path outputPath = './output/' runID = args.name + '-lr' + str(learning_rate) runPath = outputPath + runID pathlib.Path(runPath).mkdir(parents=True, exist_ok=True) print('Output: ' + runPath) with open(args.trainfile) as f: trainfiles = f.readlines() with open(args.testfile) as f: testfiles = f.readlines() generator = BalanceCovidDataset(data_dir=args.datadir, csv_file=args.trainfile, batch_size=batch_size, input_shape=(args.input_size, args.input_size), covid_percent=args.covid_percent, class_weights=[1., 1., args.covid_weight], top_percent=args.top_percent) with tf.Session() as sess: tf.get_default_graph() saver = tf.train.import_meta_graph(os.path.join(args.weightspath, args.metaname)) graph = tf.get_default_graph() image_tensor = graph.get_tensor_by_name(args.in_tensorname) labels_tensor = graph.get_tensor_by_name(args.label_tensorname) sample_weights = graph.get_tensor_by_name(args.weights_tensorname) pred_tensor = graph.get_tensor_by_name(args.logit_tensorname) # loss expects unscaled logits since it performs a softmax on logits internally for efficiency
def train_tf(epochs, lr, bs, weightspath, metaname, ckptname, trainfile, testfile, name, datadir, covid_weight, covid_percent, input_size, top_percent, in_tensorname, out_tensorname, logit_tensorname, label_tensorname, weights_tensorname, input_data_dir, output_data_dir): # Parameters learning_rate = lr batch_size = bs display_step = 1 # output path outputPath = output_data_dir runID = name + '-lr' + str(learning_rate) runPath = outputPath + runID pathlib.Path(runPath).mkdir(parents=True, exist_ok=True) print('Output: ' + runPath) with open(trainfile) as f: trainfiles = f.readlines() with open(testfile) as f: testfiles = f.readlines() generator = BalanceCovidDataset(data_dir=datadir, csv_file=trainfile, batch_size=batch_size, input_shape=(input_size, input_size), covid_percent=covid_percent, class_weights=[1., 1., covid_weight], top_percent=top_percent) with tf.Session() as sess: tf.get_default_graph() saver = tf.train.import_meta_graph(os.path.join(weightspath, metaname)) graph = tf.get_default_graph() image_tensor = graph.get_tensor_by_name(in_tensorname) labels_tensor = graph.get_tensor_by_name(label_tensorname) sample_weights = graph.get_tensor_by_name(weights_tensorname) pred_tensor = graph.get_tensor_by_name(logit_tensorname) # loss expects unscaled logits since it performs a softmax on logits internally for efficiency # Define loss and optimizer loss_op = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( logits=pred_tensor, labels=labels_tensor) * sample_weights) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Initialize the variables init = tf.global_variables_initializer() # Run the initializer sess.run(init) # load weights saver.restore(sess, os.path.join(weightspath, ckptname)) #saver.restore(sess, tf.train.latest_checkpoint(weightspath)) # save base model saver.save(sess, os.path.join(runPath, 'model')) print('Saved baseline checkpoint') print('Baseline eval:') eval(sess, graph, testfiles, os.path.join(datadir, 'test'), in_tensorname, out_tensorname, input_size) # Training cycle print('Training started') total_batch = len(generator) progbar = tf.keras.utils.Progbar(total_batch) for epoch in range(epochs): for i in range(total_batch): # Run optimization batch_x, batch_y, weights = next(generator) sess.run(train_op, feed_dict={ image_tensor: batch_x, labels_tensor: batch_y, sample_weights: weights }) progbar.update(i + 1) if epoch % display_step == 0: pred = sess.run(pred_tensor, feed_dict={image_tensor: batch_x}) loss = sess.run(loss_op, feed_dict={ pred_tensor: pred, labels_tensor: batch_y, sample_weights: weights }) print("Epoch:", '%04d' % (epoch + 1), "Minibatch loss=", "{:.9f}".format(loss)) eval(sess, graph, testfiles, os.path.join(datadir, 'test'), in_tensorname, out_tensorname, input_size) saver.save(sess, os.path.join(runPath, 'model'), global_step=epoch + 1, write_meta_graph=False) print('Saving checkpoint at epoch {}'.format(epoch + 1)) print("Optimization Finished!")
display_step = 1 # output path outputPath = './output/' runID = args.name + '-lr' + str(learning_rate) runPath = outputPath + runID pathlib.Path(runPath).mkdir(parents=True, exist_ok=True) print('Output: ' + runPath) with open(args.trainfile) as f: trainfiles = f.readlines() with open(args.testfile) as f: testfiles = f.readlines() generator = BalanceCovidDataset(data_dir=args.datadir, csv_file=args.trainfile, covid_percent=args.covid_percent, class_weights=[1., 1., args.covid_weight]) with tf.Session() as sess: tf.get_default_graph() saver = tf.train.import_meta_graph( os.path.join(args.weightspath, args.metaname)) graph = tf.get_default_graph() image_tensor = graph.get_tensor_by_name("input_1:0") labels_tensor = graph.get_tensor_by_name("dense_3_target:0") sample_weights = graph.get_tensor_by_name("dense_3_sample_weights:0") pred_tensor = graph.get_tensor_by_name("dense_3/MatMul:0") # loss expects unscaled logits since it performs a softmax on logits internally for efficiency
default='norm_dense_1_sample_weights:0', type=str, help='Name of sample weights tensor for loss') args = parser.parse_args() # Parameters learning_rate = args.lr batch_size = args.bs display_step = 1 generator_test = BalanceCovidDataset(data_dir=args.datadir, csv_file=args.testfile, is_training=False, batch_size=1, input_shape=(args.input_size, args.input_size), covid_percent=args.covid_percent, class_weights=[1., 1., 1.], top_percent=args.top_percent) model = keras_model_build() optimizer = tf.keras.optimizers.Adam(lr=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy' ]) # loss nie jest poprawiony i metryka jeszcze model.load_weights("./checkpoints_douczany_2/model.20/")