Exemplo n.º 1
0
def dataupload():
    if request.method == "POST" and "image_data" in request.files:
        file = request.files["image_data"]
        filename = secure_filename(file.filename)

        file.save(os.path.join("static", filename))
        fullfile = os.path.join("static", filename)

        # For time
        date = str(
            datetime.datetime.fromtimestamp(
                time.time()).strftime("%Y-%m-%d %H:%M:%S"))

        # EDA function
        img = cv2.imread(os.path.join("static", filename))
        df_shape = img.shape

        img = cv2.resize(img, (224, 224))
        img = np.reshape(img, [1, 224, 224, 3])

        # Placeholders
        x = tf.placeholder(tf.float32, [batch_size, 224, 224, 3])
        is_training = tf.placeholder('bool', [])

        # Model
        model = ResNetModel(is_training, depth=101, num_classes=num_classes)
        model.inference(x)
        prediction = tf.argmax(model.prob, 1)

        # saver=tf.train.Saver()
        with tf.Session() as sess:

            saver = tf.train.Saver()

            saver.restore(sess, model_path)
            idx = sess.run(prediction, feed_dict={x: img, is_training: False})
            label = label_dict[idx[0]]

        # Saving results of Uploaded files to Sqlite DB
        #newfile=FileContents(name=file.filename,modeldata=label)
        #db.session.add(newfile)
        #db.session.commit()

    return render_template('details.html',
                           filename=filename,
                           date=date,
                           df_shape=df_shape,
                           prediction=label,
                           image_file=filename)
def main(_):
    # Placeholders
    x = tf.placeholder(tf.float32, [FLAGS.batch_size, 224, 224, 3])
    y = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
    is_training = tf.placeholder('bool', [])

    # Model
    model = ResNetModel(is_training,
                        depth=FLAGS.resnet_depth,
                        num_classes=FLAGS.num_classes)
    model.inference(x)

    # Accuracy of the model
    correct_pred = tf.equal(tf.argmax(model.prob, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    saver = tf.train.Saver()
    test_preprocessor = BatchPreprocessor(dataset_file_path=FLAGS.test_file,
                                          num_classes=FLAGS.num_classes,
                                          output_size=[224, 224])
    test_batches_per_epoch = np.floor(
        len(test_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Directly restore (your model should be exactly the same with checkpoint)
        saver.restore(sess, FLAGS.ckpt)

        test_acc = 0.
        test_count = 0

        for _ in range(test_batches_per_epoch):
            batch_tx, batch_ty = test_preprocessor.next_batch(FLAGS.batch_size)
            acc = sess.run(accuracy,
                           feed_dict={
                               x: batch_tx,
                               y: batch_ty,
                               is_training: False
                           })
            test_acc += acc
            test_count += 1

        test_acc /= test_count
        print("{} Test Accuracy = {:.4f}".format(datetime.datetime.now(),
                                                 test_acc))
Exemplo n.º 3
0
def predict(path, modelpath):
    with tf.Graph().as_default():
        # Placeholders
        x = tf.placeholder(tf.float32, [1, 224, 224, 3])
        is_training = tf.placeholder('bool', [])
        imgs = []
        # path='/home/ugrad/Shang/animal/1_.jpg'
        # image = cv2.imread(path,0)

        # cv2.imwrite(path,img)
        img = cv2.imread(path)
        img = cv2.resize(img, (224, 224))
        img = img.astype(np.float32)
        imgs.append(img)
        # img=Image.open(path)
        # img = np.array(img)
        # img = tf.cast(img, tf.float32)
        # img = tf.reshape(img, [1, 227, 227, 3])

        # Model
        model = ResNetModel(is_training,
                            depth=FLAGS.resnet_depth,
                            num_classes=FLAGS.num_classes)
        logits = model.inference(x)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            # Directly restore (your model should be exactly the same with checkpoint)
            # Load the pretrained weights
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, modelpath)
            prediction = sess.run(logits,
                                  feed_dict={
                                      x: imgs,
                                      is_training: False
                                  })
            # print(prediction)
            max_index = np.argmax(prediction)
            print(max_index)
        return max_index
Exemplo n.º 4
0
def main(_):
    # Placeholders
    x = tf.placeholder(tf.float32, [1, 224, 224, 3])
    is_training = tf.placeholder('bool', [])

    # Model
    model = ResNetModel(is_training,
                        depth=FLAGS.resnet_depth,
                        num_classes=FLAGS.num_classes)
    model.inference(x)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Directly restore (your model should be exactly the same with checkpoint)
        saver.restore(sess, FLAGS.ckpt)

        batch_x = np.ndarray([1, 224, 224, 3])

        # Read image and resize it
        img = cv2.imread(FLAGS.input_image)
        img = cv2.resize(img, (224, 224))
        img = img.astype(np.float32)

        # Subtract mean color
        img -= np.array([132.2766, 139.6506, 146.9702])

        batch_x[0] = img

        scores = sess.run(model.prob,
                          feed_dict={
                              x: batch_x,
                              is_training: False
                          })
        print(scores)
Exemplo n.º 5
0
def main(_):
    # Create training directories
    now = datetime.datetime.now()
    train_dir_name = now.strftime('resnet_%Y%m%d_%H%M%S')
    train_dir = os.path.join(FLAGS.tensorboard_root_dir, train_dir_name)
    checkpoint_dir = os.path.join(train_dir, 'checkpoint')
    tensorboard_dir = os.path.join(train_dir, 'tensorboard')
    tensorboard_train_dir = os.path.join(tensorboard_dir, 'train')
    tensorboard_val_dir = os.path.join(tensorboard_dir, 'val')

    if not os.path.isdir(FLAGS.tensorboard_root_dir):
        os.mkdir(FLAGS.tensorboard_root_dir)
    if not os.path.isdir(train_dir): os.mkdir(train_dir)
    if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir)
    if not os.path.isdir(tensorboard_dir): os.mkdir(tensorboard_dir)
    if not os.path.isdir(tensorboard_train_dir):
        os.mkdir(tensorboard_train_dir)
    if not os.path.isdir(tensorboard_val_dir): os.mkdir(tensorboard_val_dir)

    # Write flags to txt
    flags_file_path = os.path.join(train_dir, 'flags.txt')
    flags_file = open(flags_file_path, 'w')
    flags_file.write('learning_rate={}\n'.format(FLAGS.learning_rate))
    flags_file.write('resnet_depth={}\n'.format(FLAGS.resnet_depth))
    flags_file.write('num_epochs={}\n'.format(FLAGS.num_epochs))
    flags_file.write('batch_size={}\n'.format(FLAGS.batch_size))
    flags_file.write('train_layers={}\n'.format(FLAGS.train_layers))
    flags_file.write('multi_scale={}\n'.format(FLAGS.multi_scale))
    flags_file.write('tensorboard_root_dir={}\n'.format(
        FLAGS.tensorboard_root_dir))
    flags_file.write('log_step={}\n'.format(FLAGS.log_step))
    flags_file.close()

    # Placeholders
    source = tf.placeholder(tf.float32, [FLAGS.batch_size, 224, 224, 3])
    target = tf.placeholder(tf.float32, [FLAGS.batch_size, 224, 224, 3])
    y = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
    is_training = tf.placeholder('bool', [])

    par = tf.Variable(tf.constant(0.2), dtype=tf.float32)

    # Model
    train_layers = FLAGS.train_layers.split(',')
    source_model = ResNetModel(source,
                               is_training,
                               depth=FLAGS.resnet_depth,
                               num_classes=FLAGS.num_classes)
    target_model = ResNetModel(target,
                               is_training,
                               reuse=True,
                               depth=FLAGS.resnet_depth,
                               num_classes=FLAGS.num_classes)

    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        logits=source_model.prob, labels=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    regularization_losses = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
    loss = tf.add_n([cross_entropy_mean] + regularization_losses)
    # domain_loss=tf.maximum(0.0001,KMMD(source_model.avg_pool,target_model.avg_pool))
    domain_loss = coral_loss(source_model.avg_pool, target_model.avg_pool)
    centers_update_op, discriminative_loss = CenterBased(
        source_model.avg_pool, y)
    # domain_loss = mmatch(source_model.avg_pool,target_model.avg_pool, 5)
    # domain_loss = log_coral_loss(source_model.adapt, target_model.adapt)
    loss = loss + 1 * par * domain_loss + 0.03 * discriminative_loss

    # train_op = model.optimize(FLAGS.learning_rate, train_layers)
    Varall = tf.trainable_variables()
    # print(Varall)
    trainable_var_names = ['weights', 'biases', 'beta', 'gamma']
    var_list = [
        v for v in tf.trainable_variables()
        if v.name.split(':')[0].split('/')[-1] in trainable_var_names
        and contains(v.name, train_layers)
    ]
    optimizer = tf.train.AdamOptimizer(
        FLAGS.learning_rate)  #.minimize(loss, var_list=var_list)

    # ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    # tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss]))

    # batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION)
    # batchnorm_updates_op = tf.group(*batchnorm_updates)
    # train_op=tf.group(train_op, batchnorm_updates_op)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # total_op=tf.group(update_ops,centers_update_op)
    with tf.control_dependencies(update_ops):
        with tf.control_dependencies([centers_update_op]):
            train_op = optimizer.minimize(loss, var_list=var_list)

    # Training accuracy of the model
    correct_pred = tf.equal(tf.argmax(source_model.prob, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Summaries
    tf.summary.scalar('train_loss', loss)
    tf.summary.scalar('train_accuracy', accuracy)
    merged_summary = tf.summary.merge_all()

    train_writer = tf.summary.FileWriter(tensorboard_train_dir)
    val_writer = tf.summary.FileWriter(tensorboard_val_dir)
    saver = tf.train.Saver()

    # Batch preprocessors
    multi_scale = FLAGS.multi_scale.split(',')
    if len(multi_scale) == 2:
        multi_scale = [int(multi_scale[0]), int(multi_scale[1])]
    else:
        multi_scale = None

    train_preprocessor = BatchPreprocessor(
        dataset_file_path=FLAGS.training_file,
        num_classes=FLAGS.num_classes,
        output_size=[224, 224],
        horizontal_flip=False,
        shuffle=True,
        multi_scale=multi_scale)

    target_preprocessor = BatchPreprocessor(
        dataset_file_path='../data/webcam.txt',
        num_classes=FLAGS.num_classes,
        output_size=[224, 224],
        shuffle=True)

    val_preprocessor = BatchPreprocessor(dataset_file_path=FLAGS.val_file,
                                         num_classes=FLAGS.num_classes,
                                         output_size=[224, 224])

    # Get the number of training/validation steps per epoch
    train_batches_per_epoch = np.floor(
        len(train_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)
    target_batches_per_epoch = np.floor(
        len(target_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)
    val_batches_per_epoch = np.floor(
        len(val_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)

    # train_batches_per_epoch=np.minimum(train_batches_per_epoch,target_batches_per_epoch)
    with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True))) as sess:
        varall = tf.trainable_variables()

        sess.run(tf.global_variables_initializer())
        train_writer.add_graph(sess.graph)

        # Load the pretrained weights
        source_model.load_original_weights(sess, skip_layers=train_layers)
        # target_model.load_original_weights(sess, skip_layers=train_layers)

        # Directly restore (your model should be exactly the same with checkpoint)
        # saver.restore(sess, "/Users/dgurkaynak/Projects/marvel-training/alexnet64-fc6/model_epoch10.ckpt")

        print("{} Start training...".format(datetime.datetime.now()))
        print("{} Open Tensorboard at --logdir {}".format(
            datetime.datetime.now(), tensorboard_dir))

        for epoch in range(FLAGS.num_epochs):
            print("{} Epoch number: {}".format(datetime.datetime.now(),
                                               epoch + 1))
            step = 1
            param = 2 / (1 + np.exp(-10 * (epoch) / FLAGS.num_epochs)) - 1
            print(param)
            sess.run(tf.assign(par, param))
            print(sess.run(par))

            # Start training
            while step < train_batches_per_epoch:
                if step % target_batches_per_epoch == 0:
                    target_preprocessor.reset_pointer()
                batch_xs, batch_ys = train_preprocessor.next_batch(
                    FLAGS.batch_size)
                batch_xt, batch_yt = target_preprocessor.next_batch(
                    FLAGS.batch_size)
                sess.run(train_op,
                         feed_dict={
                             source: batch_xs,
                             target: batch_xt,
                             y: batch_ys,
                             is_training: True
                         })

                # Logging
                # if step % FLAGS.log_step == 0:
                #     s = sess.run(merged_summary, feed_dict={source: batch_xs, y: batch_ys, is_training: False})
                #     train_writer.add_summary(s, epoch * train_batches_per_epoch + step)

                step += 1

            # Epoch completed, start validation
            print("{} Start validation".format(datetime.datetime.now()))
            test_acc = 0.
            test_count = 0

            for _ in range(val_batches_per_epoch):
                batch_tx, batch_ty = val_preprocessor.next_batch(
                    FLAGS.batch_size)
                acc = sess.run(accuracy,
                               feed_dict={
                                   source: batch_tx,
                                   y: batch_ty,
                                   is_training: False
                               })
                test_acc += acc
                test_count += 1

            test_acc /= test_count
            s = tf.Summary(value=[
                tf.Summary.Value(tag="validation_accuracy",
                                 simple_value=test_acc)
            ])
            val_writer.add_summary(s, epoch + 1)
            print("{} Validation Accuracy = {:.4f}".format(
                datetime.datetime.now(), test_acc))

            # Reset the dataset pointers
            val_preprocessor.reset_pointer()
            train_preprocessor.reset_pointer()
            target_preprocessor.reset_pointer()
Exemplo n.º 6
0
# Placeholders
is_training = tf.placeholder('bool', [])
x = tf.placeholder(tf.float32, [batch_size, im_m, im_n, 3])
base = tf.constant(np.full([1, 256, 256, 270], range(270)))
y = tf.placeholder(tf.int32, [batch_size, im_m, im_n, 1])
#y = tf.placeholder(tf.int32, [batch_size, im_m, im_n])
z = tf.placeholder(tf.int32, [batch_size, im_m, im_n])
z2 = tf.placeholder(tf.int32, [batch_size, 128, 128])
edge_num = tf.placeholder(tf.int32)
node_num = tf.placeholder(tf.int32)
receive = tf.placeholder(tf.int32, [None])
senders = tf.placeholder(tf.int32, [None])
edge_feature = tf.placeholder(tf.float32, [None, 1])
global_fe = tf.placeholder(tf.float32, [1, 32])
lr_ = tf.placeholder(tf.float32)
model = ResNetModel(x=x, is_training=is_training, depth=50)
loss, cross_entropy_mean, node, superlabel, attention = model.stage2_fuse_test(
    x, y, z, edge_num, receive, senders, edge_feature, node_num, base)
# loss, l1_map ,superlabel,loss1,loss2,loss3,loss4,loss5=model.stage2_fuse(x,y,z,z2,edge_num,receive,senders,edge_feature,node_num,base)
#g6,gl=model.stage2_loss(x,y,z,edge_num,receive,senders,edge_feature,node_num)
# f1,f2 = model.stage2_loss(x,y,z,edge_num,receive,senders,edge_feature,node_num)

update_op = model.optimize(learning_rate=lr_)

nm = np.array([1])

###########################################################

#################################################3
with tf.Session(config=config) as sess:
    coord = tf.train.Coordinator()
def main(_):
    # Create training directories
    now = datetime.datetime.now()
    train_dir_name = now.strftime('resnet_%Y%m%d_%H%M%S')
    train_dir = os.path.join(FLAGS.tensorboard_root_dir, train_dir_name)
    checkpoint_dir = os.path.join(train_dir, 'checkpoint')
    tensorboard_dir = os.path.join(train_dir, 'tensorboard')
    tensorboard_train_dir = os.path.join(tensorboard_dir, 'train')
    tensorboard_val_dir = os.path.join(tensorboard_dir, 'val')

    if not os.path.isdir(FLAGS.tensorboard_root_dir):
        os.mkdir(FLAGS.tensorboard_root_dir)
    if not os.path.isdir(train_dir): os.mkdir(train_dir)
    if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir)
    if not os.path.isdir(tensorboard_dir): os.mkdir(tensorboard_dir)
    if not os.path.isdir(tensorboard_train_dir):
        os.mkdir(tensorboard_train_dir)
    if not os.path.isdir(tensorboard_val_dir): os.mkdir(tensorboard_val_dir)

    # Write flags to txt
    flags_file_path = os.path.join(train_dir, 'flags.txt')
    flags_file = open(flags_file_path, 'w')
    flags_file.write('learning_rate={}\n'.format(FLAGS.learning_rate))
    flags_file.write('resnet_depth={}\n'.format(FLAGS.resnet_depth))
    flags_file.write('num_epochs={}\n'.format(FLAGS.num_epochs))
    flags_file.write('batch_size={}\n'.format(FLAGS.batch_size))
    flags_file.write('train_layers={}\n'.format(FLAGS.train_layers))

    flags_file.write('tensorboard_root_dir={}\n'.format(
        FLAGS.tensorboard_root_dir))
    flags_file.write('log_step={}\n'.format(FLAGS.log_step))
    flags_file.close()

    tra_Image_Data_Class = ImageData(FLAGS.batch_size, img_size, img_ch, True,
                                     FLAGS.num_classes)
    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels))
    train_dataset = train_dataset.map(
        tra_Image_Data_Class.image_processing,
        num_parallel_calls=8).shuffle(10000).prefetch(FLAGS.batch_size).batch(
            FLAGS.batch_size).repeat()

    train_iterator = train_dataset.make_initializable_iterator()
    tra_img, tra_lab = train_iterator.get_next()

    val_Image_Data_Class = ImageData(FLAGS.batch_size, img_size, img_ch, False,
                                     FLAGS.num_classes)
    val_dataset = tf.data.Dataset.from_tensor_slices((val_images, val_labels))
    val_dataset = val_dataset.map(
        val_Image_Data_Class.image_processing,
        num_parallel_calls=8).shuffle(10000).prefetch(FLAGS.batch_size).batch(
            FLAGS.batch_size).repeat()

    val_iterator = val_dataset.make_initializable_iterator()
    val_img, val_lab = val_iterator.get_next()

    # Model
    train_layers = FLAGS.train_layers.split(',')
    model = ResNetModel(is_training,
                        depth=FLAGS.resnet_depth,
                        num_classes=FLAGS.num_classes)
    loss = model.loss(x, y)
    train_op = model.optimize(FLAGS.learning_rate, train_layers)

    # Training accuracy of the model
    correct_pred = tf.equal(tf.argmax(model.prob, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Summaries
    tf.summary.scalar('train_loss', loss)
    tf.summary.scalar('train_accuracy', accuracy)
    merged_summary = tf.summary.merge_all()

    train_writer = tf.summary.FileWriter(tensorboard_train_dir)
    val_writer = tf.summary.FileWriter(tensorboard_val_dir)
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        train_writer.add_graph(sess.graph)

        sess.run(train_iterator.initializer,
                 feed_dict={
                     image: train_images,
                     label: train_labels,
                     batch_size: FLAGS.batch_size
                 })
        # Load the pretrained weights
        model.load_original_weights(sess, skip_layers=train_layers)

        print("{} Start training...".format(datetime.datetime.now()))
        for epoch in range(FLAGS.num_epochs):
            step = 1
            print("{} Epoch number: {}".format(datetime.datetime.now(),
                                               epoch + 1))
            for idx in range(tra_num_batches):
                batch_x, batch_y = sess.run([tra_img, tra_lab])

                _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                                feed_dict={
                                                    x: batch_x,
                                                    y: batch_y,
                                                    is_training: True
                                                })

                if step % FLAGS.log_step == 0:
                    #print("[ Epoch:%d ],Step:%d,** loss:%f,accuracy:%f **"%(epoch,step,tra_loss,tra_acc))
                    s = sess.run(merged_summary,
                                 feed_dict={
                                     x: batch_x,
                                     y: batch_y,
                                     is_training: False
                                 })
                    train_writer.add_summary(s, epoch * tra_num_batches + step)
                step += 1

            print("{} Start validation".format(datetime.datetime.now()))
            test_acc = 0.
            test_count = 0

            sess.run(val_iterator.initializer,
                     feed_dict={
                         image: val_images,
                         label: val_labels,
                         batch_size: FLAGS.batch_size
                     })
            for _ in range(val_num_batches):
                batch_x, batch_y = sess.run([val_img, val_lab])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: batch_x,
                                                 y: batch_y,
                                                 is_training: False
                                             })

                test_acc += val_acc
                test_count += 1

            test_acc /= test_count
            s = tf.Summary(value=[
                tf.Summary.Value(tag="validation_accuracy",
                                 simple_value=test_acc)
            ])
            val_writer.add_summary(s, epoch + 1)

            print("{} Validation Accuracy = {:.4f}".format(
                datetime.datetime.now(), test_acc))

            print("{} Saving checkpoint of model...".format(
                datetime.datetime.now()))
            #save checkpoint of the model
            checkpoint_path = os.path.join(
                checkpoint_dir, 'model_epoch' + str(epoch + 1) + '.ckpt')
            save_path = saver.save(sess, checkpoint_path)
            print("{} Model checkpoint saved at {}".format(
                datetime.datetime.now(), checkpoint_path))
Exemplo n.º 8
0
def main(_):
    # Create training directories
    now = datetime.datetime.now()
    train_dir_name = now.strftime('resnet_%Y%m%d_%H%M%S')
    train_dir = os.path.join(FLAGS.tensorboard_root_dir, train_dir_name)
    checkpoint_dir = os.path.join(train_dir, 'checkpoint')
    tensorboard_dir = os.path.join(train_dir, 'tensorboard')
    tensorboard_train_dir = os.path.join(tensorboard_dir, 'train')
    tensorboard_val_dir = os.path.join(tensorboard_dir, 'val')

    if not os.path.isdir(FLAGS.tensorboard_root_dir): os.mkdir(FLAGS.tensorboard_root_dir)
    if not os.path.isdir(train_dir): os.mkdir(train_dir)
    if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir)
    if not os.path.isdir(tensorboard_dir): os.mkdir(tensorboard_dir)
    if not os.path.isdir(tensorboard_train_dir): os.mkdir(tensorboard_train_dir)
    if not os.path.isdir(tensorboard_val_dir): os.mkdir(tensorboard_val_dir)

    # Write flags to txt
    flags_file_path = os.path.join(train_dir, 'flags.txt')
    flags_file = open(flags_file_path, 'w')
    flags_file.write('learning_rate={}\n'.format(FLAGS.learning_rate))
    flags_file.write('resnet_depth={}\n'.format(FLAGS.resnet_depth))
    flags_file.write('num_epochs={}\n'.format(FLAGS.num_epochs))
    flags_file.write('batch_size={}\n'.format(FLAGS.batch_size))
    flags_file.write('train_layers={}\n'.format(FLAGS.train_layers))
    flags_file.write('multi_scale={}\n'.format(FLAGS.multi_scale))


    flags_file.write('tensorboard_root_dir={}\n'.format(FLAGS.tensorboard_root_dir))
    flags_file.write('log_step={}\n'.format(FLAGS.log_step))
    flags_file.close()

    # Placeholders
    source = tf.placeholder(tf.float32, [FLAGS.batch_size, 224, 224, 3])
    target = tf.placeholder(tf.float32, [FLAGS.batch_size, 224, 224, 3])
    y = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
    is_training = tf.placeholder('bool', [])
    dropout_rate=tf.placeholder(dtype=tf.float32,shape=None)

    domain_loss_param=tf.get_variable(name="domain_loss_param",dtype=tf.float32,initializer=tf.constant(1.0),trainable=False)
    target_loss_param=tf.get_variable(name='target_loss_param',dtype=tf.float32,initializer=tf.constant(0.0),trainable=False)
    logits_threshold=tf.get_variable(name='logits_threshold',dtype=tf.float32,initializer=tf.constant(0.0),trainable=False)
    ring_norm = tf.get_variable(name="fc/ring_norm", shape=None, dtype=tf.float32, initializer=tf.constant(100.0),trainable=False)
    clustering_param=tf.get_variable(name='Ortho_loss_param',dtype=tf.float32,initializer=tf.constant(0.0),trainable=False)

    # Model
    train_layers = FLAGS.train_layers.split(',')
    source_model = ResNetModel(source,is_training, depth=FLAGS.resnet_depth, dropout_rate=dropout_rate,num_classes=FLAGS.num_classes)
    target_model = ResNetModel(target,is_training,reuse=True, depth=FLAGS.resnet_depth, dropout_rate=dropout_rate,num_classes=FLAGS.num_classes)
    # fc_weights=tf.get_default_graph().get_tensor_by_name("fc/weights:0")
    # Orthogonal_regularizer=tf.reduce_mean(tf.norm(tf.matmul(tf.transpose(fc_weights),fc_weights)-tf.eye(FLAGS.num_classes),ord=2))
    # Grad_loss=GradRegularization(target_model.prob,target_model.avg_pool)

### Calculating the loss function

    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=source_model.prob, labels=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    regularization_losses = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    source_loss = cross_entropy_mean+1.0*regularization_losses
    domain_loss= HoMM(source_model.adapt,target_model.adapt,order=4,num=300000)
    target_loss=Target_loss(tf.nn.softmax(target_model.prob),logits_threshold)
    centers_update_op,discriminative_loss,source_centers=CenterBased(source_model.adapt,y)
    target_feature,target_logits=SelectTargetSamples(target_model.adapt,target_model.prob,logits_threshold=0.75)
    target_predict_label=tf.argmax(target_logits,axis=1)
    target_pseudo_label=tf.one_hot(target_predict_label,FLAGS.num_classes)
    with tf.variable_scope('target'):
        centers_update_op_1, discriminative_clustering,target_centers = CenterBased(target_feature, target_pseudo_label)
    # class_domain_loss=AlignCenter(centers_update_op,centers_update_op_1)
    ring_loss = Cal_RingLoss(ring_norm,source_model.avg_pool,target_model.avg_pool)


# office 1000 0.01  0.0003 ## office-Home 1000 0.01 0.001
    loss=source_loss+200*domain_loss_param*domain_loss+clustering_param*discriminative_clustering




    # train_op = model.optimize(FLAGS.learning_rate, train_layers)
    Varall=tf.trainable_variables()
    # print(Varall)
    trainable_var_names = ['weights', 'biases', 'beta', 'gamma','adapt']
    # var_list_1 = [v for v in tf.trainable_variables() if v.name.split(':')[0].split('/')[-1] in trainable_var_names and contains(v.name, train_layers)]
    var_list_1 = [var for var in tf.trainable_variables() if 'scale5/block3' in var.name]
    var_list_2=[var for var in tf.trainable_variables() if 'fc' in var.name or 'adapt' in var.name]
    var_list_3 = [var for var in tf.trainable_variables() if 'scale5/block2' in var.name]

    Varall = tf.trainable_variables()
    optimizer1 = tf.train.AdamOptimizer(FLAGS.learning_rate)
    optimizer2 = tf.train.AdamOptimizer(learning_rate=0.0003)
    # optimizer3 = tf.train.AdamOptimizer(learning_rate=0.000005)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        with tf.control_dependencies([centers_update_op,centers_update_op_1]):
            op1 = optimizer1.minimize(loss,var_list=var_list_1)
            op2 = optimizer2.minimize(loss,var_list=var_list_2)
            # op3 = optimizer3.minimize(loss,var_list=var_list_3)
            train_op=tf.group(op1,op2)



    # Training accuracy of the model
    correct_pred = tf.equal(tf.argmax(source_model.prob, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Summaries
    tf.summary.scalar('train_loss', loss)
    tf.summary.scalar('train_accuracy', accuracy)
    merged_summary = tf.summary.merge_all()

    train_writer = tf.summary.FileWriter(tensorboard_train_dir)
    val_writer = tf.summary.FileWriter(tensorboard_val_dir)
    saver = tf.train.Saver()

    # Batch preprocessors
    multi_scale = FLAGS.multi_scale.split(',')
    if len(multi_scale) == 2:
        multi_scale = [int(multi_scale[0]), int(multi_scale[1])]
    else:
        multi_scale = None

    train_preprocessor = BatchPreprocessor(dataset_file_path=FLAGS.training_file, num_classes=FLAGS.num_classes,
                                           output_size=[224, 224], horizontal_flip=False, shuffle=True, multi_scale=multi_scale)

    target_preprocessor = BatchPreprocessor(dataset_file_path='../data/webcam.txt', num_classes=FLAGS.num_classes,output_size=[224, 224],shuffle=True)

    val_preprocessor = BatchPreprocessor(dataset_file_path=FLAGS.val_file, num_classes=FLAGS.num_classes, output_size=[224, 224])

    # Get the number of training/validation steps per epoch
    train_batches_per_epoch = np.floor(len(train_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)
    target_batches_per_epoch = np.floor(len(target_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)
    val_batches_per_epoch = np.floor(len(val_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)

    # train_batches_per_epoch=np.minimum(train_batches_per_epoch,target_batches_per_epoch)
    with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
        varall=tf.trainable_variables()
        sess.run(tf.global_variables_initializer())
        train_writer.add_graph(sess.graph)
        # Load the pretrained weights
        source_model.load_original_weights(sess, skip_layers=train_layers)
        # target_model.load_original_weights(sess, skip_layers=train_layers)

        # Directly restore (your model should be exactly the same with checkpoint)
        # saver.restore(sess, "/Users/dgurkaynak/Projects/marvel-training/alexnet64-fc6/model_epoch10.ckpt")

        print("{} Start training...".format(datetime.datetime.now()))
        print("{} Open Tensorboard at --logdir {}".format(datetime.datetime.now(), tensorboard_dir))


        Acc_convergency=[]
        for epoch in range(FLAGS.num_epochs):
            print("{} Epoch number: {}".format(datetime.datetime.now(), epoch+1))
            step = 1
            # Start training
            while step < train_batches_per_epoch:
                if step%target_batches_per_epoch==0:
                    target_preprocessor.reset_pointer()
                batch_xs, batch_ys = train_preprocessor.next_batch(FLAGS.batch_size)
                batch_xt, batch_yt = target_preprocessor.next_batch(FLAGS.batch_size)
                TotalLoss, SourceLoss, DomainLoss, TargetLoss, RingLoss, _=sess.run(
                    fetches=[loss, source_loss, domain_loss, target_loss, ring_loss, train_op],
                    feed_dict={source: batch_xs,target:batch_xt, y: batch_ys, is_training: True,dropout_rate:1.0})

                ############################ print loss ##################################################
                print "Loss={} ### SourceLoss={} ### DomainLoss={} ### TargetLoss={} ### RingLoss={}".format(TotalLoss, SourceLoss, DomainLoss, TargetLoss, RingLoss)

                # Logging
                # if step % FLAGS.log_step == 0:
                #     s = sess.run(merged_summary, feed_dict={source: batch_xs, y: batch_ys, is_training: False})
                #     train_writer.add_summary(s, epoch * train_batches_per_epoch + step)

                step += 1

            if epoch % 3 == 0 :

                # Epoch completed, start validation
                print("{} Start validation".format(datetime.datetime.now()))
                test_acc = 0.
                test_count = 0

                for _ in range(val_batches_per_epoch):
                    batch_tx, batch_ty = val_preprocessor.next_batch(FLAGS.batch_size)
                    acc= sess.run(accuracy, feed_dict={source: batch_tx, y: batch_ty, is_training: False,dropout_rate:1.0})
                    test_acc += acc
                    test_count += 1

                test_acc /= test_count
                s = tf.Summary(value=[tf.Summary.Value(tag="validation_accuracy", simple_value=test_acc)])
                val_writer.add_summary(s, epoch+1)
                print("{} Validation Accuracy = {:.4f}".format(datetime.datetime.now(), test_acc))
                Acc_convergency.append(test_acc)
                print Acc_convergency


            if epoch==100:
                sess.run(tf.assign(clustering_param, 0.0))






            # Reset the dataset pointers
            val_preprocessor.reset_pointer()
            train_preprocessor.reset_pointer()
            target_preprocessor.reset_pointer()

####################### log the convergency data#######################
        savedata = np.array(Acc_convergency)
        np.save("AtoD_SDDA_Source.npy", savedata)
Exemplo n.º 9
0
if args.enc_model == "pnas":
    print("PNAS Model")
    from model import PNASModel
    model = PNASModel(train_enc=bool(args.train_enc),
                      load_weight=args.load_weight)

elif args.enc_model == "densenet":
    print("DenseNet Model")
    from model import DenseModel
    model = DenseModel(train_enc=bool(args.train_enc),
                       load_weight=args.load_weight)

elif args.enc_model == "resnet":
    print("ResNet Model")
    from model import ResNetModel
    model = ResNetModel(train_enc=bool(args.train_enc),
                        load_weight=args.load_weight)

elif args.enc_model == "vgg":
    print("VGG Model")
    from model import VGGModel
    model = VGGModel(train_enc=bool(args.train_enc),
                     load_weight=args.load_weight)

elif args.enc_model == "mobilenet":
    print("Mobile NetV2")
    from model import MobileNetV2
    model = MobileNetV2(train_enc=bool(args.train_enc),
                        load_weight=args.load_weight)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
Exemplo n.º 10
0
def main(_):
    # Create training directories
    now = datetime.datetime.now()
    train_dir_name = now.strftime('resnet_%Y%m%d_%H%M%S')
    train_dir = os.path.join(FLAGS.tensorboard_root_dir, train_dir_name)
    checkpoint_dir = os.path.join(train_dir, 'checkpoint')
    tensorboard_dir = os.path.join(train_dir, 'tensorboard')
    tensorboard_train_dir = os.path.join(tensorboard_dir, 'train')
    tensorboard_val_dir = os.path.join(tensorboard_dir, 'val')

    if not os.path.isdir(FLAGS.tensorboard_root_dir): os.mkdir(FLAGS.tensorboard_root_dir)
    if not os.path.isdir(train_dir): os.mkdir(train_dir)
    if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir)
    if not os.path.isdir(tensorboard_dir): os.mkdir(tensorboard_dir)
    if not os.path.isdir(tensorboard_train_dir): os.mkdir(tensorboard_train_dir)
    if not os.path.isdir(tensorboard_val_dir): os.mkdir(tensorboard_val_dir)

    # Write flags to txt
    flags_file_path = os.path.join(train_dir, 'flags.txt')
    flags_file = open(flags_file_path, 'w')
    flags_file.write('learning_rate={}\n'.format(FLAGS.learning_rate))
    flags_file.write('resnet_depth={}\n'.format(FLAGS.resnet_depth))
    flags_file.write('num_epochs={}\n'.format(FLAGS.num_epochs))
    flags_file.write('batch_size={}\n'.format(FLAGS.batch_size))
    flags_file.write('train_layers={}\n'.format(FLAGS.train_layers))
    flags_file.write('multi_scale={}\n'.format(FLAGS.multi_scale))
    flags_file.write('tensorboard_root_dir={}\n'.format(FLAGS.tensorboard_root_dir))
    flags_file.write('log_step={}\n'.format(FLAGS.log_step))
    flags_file.close()

    # Placeholders
    x = tf.placeholder(tf.float32, [FLAGS.batch_size, 224, 224, 3])
    y = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
    is_training = tf.placeholder('bool', [])

    # Model
    train_layers = FLAGS.train_layers.split(',')
    model = ResNetModel(is_training, depth=FLAGS.resnet_depth, num_classes=FLAGS.num_classes)
    loss = model.loss(x, y)
    train_op = model.optimize(FLAGS.learning_rate, train_layers)

    # Training accuracy of the model
    correct_pred = tf.equal(tf.argmax(model.prob, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Summaries
    tf.summary.scalar('train_loss', loss)
    tf.summary.scalar('train_accuracy', accuracy)
    merged_summary = tf.summary.merge_all()

    train_writer = tf.summary.FileWriter(tensorboard_train_dir)
    val_writer = tf.summary.FileWriter(tensorboard_val_dir)
    saver = tf.train.Saver()

    # Batch preprocessors
    multi_scale = FLAGS.multi_scale.split(',')
    if len(multi_scale) == 2:
        multi_scale = [int(multi_scale[0]), int(multi_scale[1])]
    else:
        multi_scale = None

    train_preprocessor = BatchPreprocessor(dataset_file_path=FLAGS.training_file, num_classes=FLAGS.num_classes,
                                           output_size=[224, 224], horizontal_flip=True, shuffle=True, multi_scale=multi_scale)
    val_preprocessor = BatchPreprocessor(dataset_file_path=FLAGS.val_file, num_classes=FLAGS.num_classes, output_size=[224, 224])

    # Get the number of training/validation steps per epoch
    train_batches_per_epoch = np.floor(len(train_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)
    val_batches_per_epoch = np.floor(len(val_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)


    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        train_writer.add_graph(sess.graph)

        # Load the pretrained weights
        model.load_original_weights(sess, skip_layers=train_layers)

        # Directly restore (your model should be exactly the same with checkpoint)
        # saver.restore(sess, "/Users/dgurkaynak/Projects/marvel-training/alexnet64-fc6/model_epoch10.ckpt")

        print("{} Start training...".format(datetime.datetime.now()))
        print("{} Open Tensorboard at --logdir {}".format(datetime.datetime.now(), tensorboard_dir))

        for epoch in range(FLAGS.num_epochs):
            print("{} Epoch number: {}".format(datetime.datetime.now(), epoch+1))
            step = 1

            # Start training
            while step < train_batches_per_epoch:
                batch_xs, batch_ys = train_preprocessor.next_batch(FLAGS.batch_size)
                sess.run(train_op, feed_dict={x: batch_xs, y: batch_ys, is_training: True})

                # Logging
                if step % FLAGS.log_step == 0:
                    s = sess.run(merged_summary, feed_dict={x: batch_xs, y: batch_ys, is_training: False})
                    train_writer.add_summary(s, epoch * train_batches_per_epoch + step)

                step += 1

            # Epoch completed, start validation
            print("{} Start validation".format(datetime.datetime.now()))
            test_acc = 0.
            test_count = 0

            for _ in range(val_batches_per_epoch):
                batch_tx, batch_ty = val_preprocessor.next_batch(FLAGS.batch_size)
                acc = sess.run(accuracy, feed_dict={x: batch_tx, y: batch_ty, is_training: False})
                test_acc += acc
                test_count += 1

            test_acc /= test_count
            s = tf.Summary(value=[
                tf.Summary.Value(tag="validation_accuracy", simple_value=test_acc)
            ])
            val_writer.add_summary(s, epoch+1)
            print("{} Validation Accuracy = {:.4f}".format(datetime.datetime.now(), test_acc))

            # Reset the dataset pointers
            val_preprocessor.reset_pointer()
            train_preprocessor.reset_pointer()
            
	    #if epoch % FLAGS.num_epochs == 0:

            print("{} Saving checkpoint of model...".format(datetime.datetime.now()))
                  #save checkpoint of the model
            checkpoint_path = os.path.join(checkpoint_dir, 'model_epoch'+str(epoch+1)+'.ckpt')
            save_path = saver.save(sess, checkpoint_path)
            print("{} Model checkpoint saved at {}".format(datetime.datetime.now(), checkpoint_path))
Exemplo n.º 11
0
    return image, label, A
##################################################################################################################
train_csv = tf.train.string_input_producer(['DUTS-na.csv'])
train_image, train_label, A = read_csv(train_csv, augmentation=False)
X_train_batch_op, y_train_batch_op = tf.train.shuffle_batch([train_image, train_label], batch_size=batch_size,
                                                                capacity=batch_size * 2,
                                                                min_after_dequeue=batch_size * 1,
                                                                allow_smaller_final_batch=True)
##################################################################################################################
# Model
# Placeholders
is_training = tf.placeholder('bool', [])
x = tf.placeholder(tf.float32, [batch_size, im_m, im_n, 3])
y = tf.placeholder(tf.int32, [batch_size, im_m, im_n])
lr_ = tf.placeholder(tf.float32)
model = ResNetModel(x=x,is_training=is_training, depth=50)
loss, concat_up, lossf_mean, lap_loss = model.DGR(x, y, loss_weight=0.0)
update_op = model.optimize(learning_rate=lr_)

upscore_fuse = concat_up[:,:,:,1]
with tf.Session(config=config) as sess:
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    #writer = tf.summary.FileWriter('log/logs', sess.graph)
    sess.run(tf.global_variables_initializer())
    # Load the pretrained weights
    #model.load_original_weights(sess, skip_layers=['fc'])
    ###################################################################################
    tf.train.Saver(var_list=tf.global_variables()).restore(sess, 'base/new_bn/4')
    logfile = open("logfile.txt", 'a')
    print("load Resnet.npy",file=logfile)
Exemplo n.º 12
0
edge_label = tf.placeholder(tf.int32, [None])

z = tf.placeholder(tf.int32, [batch_size, im_m, im_n])

edge_num = tf.placeholder(tf.int32)
node_num = tf.placeholder(tf.int32)
receive = tf.placeholder(tf.int32, [None])
senders = tf.placeholder(tf.int32, [None])
edge_feature = tf.placeholder(tf.float32, [None, 1])
global_fe = tf.placeholder(tf.float32, [1, 32])
gf = np.zeros([1, 32])
lr_ = tf.placeholder(tf.float32)

#label_in = create_label(y,z)

model = ResNetModel(x=x, is_training=is_training, depth=50)

loss, loss1_mean, loss2_mean, score1, score2 = model.stage2_loss_test(
    x, y1, edge_label, z, edge_num, receive, senders, edge_feature, node_num,
    global_fe)
#g6,gl=model.stage2_loss(x,y,z,edge_num,receive,senders,edge_feature,node_num)
# f1,f2 = model.stage2_loss(x,y,z,edge_num,receive,senders,edge_feature,node_num)

update_op = model.optimize(learning_rate=lr_)

nm = np.array([1])

###########################################################

#################################################3
with tf.Session(config=config) as sess:
Exemplo n.º 13
0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

if args.enc_model == "pnas":
    print("PNAS Model")
    from model import PNASModel
    model = PNASModel()

elif args.enc_model == "densenet":
    print("DenseNet Model")
    from model import DenseModel
    model = DenseModel()

elif args.enc_model == "resnet":
    print("ResNet Model")
    from model import ResNetModel
    model = ResNetModel()

elif args.enc_model == "vgg":
    print("VGG Model")
    from model import VGGModel
    model = VGGModel()

elif args.enc_model == "mobilenet":
    print("Mobile NetV2")
    from model import MobileNetV2
    model = MobileNetV2()

if args.enc_model != "mobilenet":
    model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_val_path))
def main(_):
    # Create training directories
    now = datetime.datetime.now()
    train_dir_name = now.strftime('resnet_%Y%m%d_%H%M%S')
    train_dir = os.path.join(FLAGS.tensorboard_root_dir, train_dir_name)
    checkpoint_dir = os.path.join(train_dir, 'checkpoint')
    tensorboard_dir = os.path.join(train_dir, 'tensorboard')
    tensorboard_train_dir = os.path.join(tensorboard_dir, 'train')
    tensorboard_val_dir = os.path.join(tensorboard_dir, 'val')

    if not os.path.isdir(FLAGS.tensorboard_root_dir):
        os.mkdir(FLAGS.tensorboard_root_dir)
    if not os.path.isdir(train_dir): os.mkdir(train_dir)
    if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir)
    if not os.path.isdir(tensorboard_dir): os.mkdir(tensorboard_dir)
    if not os.path.isdir(tensorboard_train_dir):
        os.mkdir(tensorboard_train_dir)
    if not os.path.isdir(tensorboard_val_dir): os.mkdir(tensorboard_val_dir)

    # Write flags to txt
    flags_file_path = os.path.join(train_dir, 'flags.txt')
    flags_file = open(flags_file_path, 'w')
    flags_file.write('learning_rate={}\n'.format(FLAGS.learning_rate))
    flags_file.write('resnet_depth={}\n'.format(FLAGS.resnet_depth))
    flags_file.write('num_epochs={}\n'.format(FLAGS.num_epochs))
    flags_file.write('batch_size={}\n'.format(FLAGS.batch_size))
    flags_file.write('train_layers={}\n'.format(FLAGS.train_layers))
    flags_file.write('multi_scale={}\n'.format(FLAGS.multi_scale))
    flags_file.write('tensorboard_root_dir={}\n'.format(
        FLAGS.tensorboard_root_dir))
    flags_file.write('log_step={}\n'.format(FLAGS.log_step))
    flags_file.close()

    # Placeholders
    #x = tf.placeholder(tf.float32, [FLAGS.batch_size, 224, 224, 3], name='input')
    x = tf.placeholder(tf.float32, [None, 224, 224, 3], name='input')
    y = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
    is_training = tf.placeholder('bool', [], name='trainval')

    # Model
    train_layers = FLAGS.train_layers.split(',')
    model = ResNetModel(is_training,
                        depth=FLAGS.resnet_depth,
                        num_classes=FLAGS.num_classes)
    loss = model.loss(x, y)
    train_op = model.optimize(FLAGS.learning_rate, train_layers)

    # Link variable to model output
    predict = model.prob
    output = tf.nn.softmax(predict, name='output')

    # Training accuracy of the model
    correct_pred = tf.equal(tf.argmax(model.prob, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Summaries
    tf.summary.scalar('train_loss', loss)
    tf.summary.scalar('train_accuracy', accuracy)
    merged_summary = tf.summary.merge_all()

    train_writer = tf.summary.FileWriter(tensorboard_train_dir)
    val_writer = tf.summary.FileWriter(tensorboard_val_dir)
    saver = tf.train.Saver()

    # Batch preprocessors
    multi_scale = FLAGS.multi_scale.split(',')
    if len(multi_scale) == 2:
        multi_scale = [int(multi_scale[0]), int(multi_scale[1])]
    else:
        multi_scale = None

    train_preprocessor = BatchPreprocessor(
        dataset_file_path=FLAGS.training_file,
        num_classes=FLAGS.num_classes,
        output_size=[224, 224],
        horizontal_flip=False,
        shuffle=True,
        multi_scale=multi_scale)
    val_preprocessor = BatchPreprocessor(dataset_file_path=FLAGS.val_file,
                                         num_classes=FLAGS.num_classes,
                                         output_size=[224, 224])

    # Get the number of training/validation steps per epoch
    train_batches_per_epoch = np.floor(
        len(train_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)
    val_batches_per_epoch = np.floor(
        len(val_preprocessor.labels) / FLAGS.batch_size).astype(np.int16)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        train_writer.add_graph(sess.graph)

        # Load the pretrained weights
        #model.load_original_weights(sess, skip_layers=train_layers)

        # Directly restore (your model should be exactly the same with checkpoint)
        # saver.restore(sess, "/Users/dgurkaynak/Projects/marvel-training/alexnet64-fc6/model_epoch10.ckpt")

        print("{} Start training...".format(datetime.datetime.now()))
        print("{} Open Tensorboard at --logdir {}".format(
            datetime.datetime.now(), tensorboard_dir))

        for epoch in range(FLAGS.num_epochs):
            print("{} Epoch number: {}".format(datetime.datetime.now(),
                                               epoch + 1))
            step = 1

            # Start training
            while step < train_batches_per_epoch:
                batch_xs, batch_ys = train_preprocessor.next_batch(
                    FLAGS.batch_size)
                sess.run(train_op,
                         feed_dict={
                             x: batch_xs,
                             y: batch_ys,
                             is_training: True
                         })

                # Logging
                if step % FLAGS.log_step == 0:
                    train_loss, train_acc, s = sess.run(
                        [loss, accuracy, merged_summary],
                        feed_dict={
                            x: batch_xs,
                            y: batch_ys,
                            is_training: False
                        })
                    train_writer.add_summary(
                        s, epoch * train_batches_per_epoch + step)
                    print(
                        "Iter {}/{}, training mini-batch loss = {:.5f}, training accuracy = {:.5f}"
                        .format(step * FLAGS.batch_size,
                                train_batches_per_epoch * FLAGS.batch_size,
                                train_loss, train_acc))

                step += 1

            # Epoch completed, start validation
            print("{} Start validation".format(datetime.datetime.now()))
            test_acc = 0.
            test_count = 0
            test_loss = 0
            t1 = time.time()
            for i in range(val_batches_per_epoch):
                batch_tx, batch_ty = val_preprocessor.next_batch(
                    FLAGS.batch_size)
                val_loss, val_acc, val_out = sess.run([loss, accuracy, output],
                                                      feed_dict={
                                                          x: batch_tx,
                                                          y: batch_ty,
                                                          is_training: False
                                                      })
                test_acc += val_acc
                test_loss += val_loss
                test_count += 1

                y_true = np.argmax(batch_ty, 1)
                y_pre = np.argmax(val_out, 1)
                #print(len(y_true),len(y_pre))
                #for k in range(FLAGS.batch_size):
                #    if not (y_pre[k] == 0 or y_pre[k] == 1):
                #        y_pre[k] = 0
                #if i == 0:
                #    conf_matrix = confusion_matrix(y_true, y_pre)
                #else:
                #    conf_matrix += confusion_matrix(y_true, y_pre)
                #conf_matrix = confusion_matrix(y_true, y_pre)
                #print(i, conf_matrix)

                if i == 0:
                    all_pred_y = y_pre
                    all_real_y = y_true
                else:
                    all_pred_y = np.concatenate((all_pred_y, y_pre), axis=0)
                    all_real_y = np.concatenate((all_real_y, y_true), axis=0)

            test_acc /= test_count
            test_loss /= test_count
            t2 = time.time() - t1
            s = tf.Summary(value=[
                tf.Summary.Value(tag="validation_accuracy",
                                 simple_value=test_acc)
            ])
            val_writer.add_summary(s, epoch + 1)
            print("{} Validation Accuracy = {:.4f}, loss = {:.4f}".format(
                datetime.datetime.now(), test_acc, test_loss))
            print("Test image {:.4f}ms per image".format(
                t2 * 1000 / (val_batches_per_epoch * FLAGS.batch_size)))
            conf_matrix = confusion_matrix(all_real_y, all_pred_y)
            print(conf_matrix.ravel())
            #y_batch_predict = np.zeros((val_batches_per_epoch*FLAGS.batch_size, FLAGS.num_classes))
            #for j in range(val_batches_per_epoch*FLAGS.batch_size):
            #    y_batch_predict[j][all_pred_y[j]] = 1
            class_report = classification_report(all_real_y, all_pred_y)
            print(class_report)
            # Reset the dataset pointers
            val_preprocessor.reset_pointer()
            train_preprocessor.reset_pointer()

            print("{} Saving checkpoint of model...".format(
                datetime.datetime.now()))

            #save checkpoint of the model
            checkpoint_path = os.path.join(
                checkpoint_dir, 'model_epoch' + str(epoch + 1) + '.ckpt')
            save_path = saver.save(sess, checkpoint_path)

            print("{} Model checkpoint saved at {}".format(
                datetime.datetime.now(), checkpoint_path))
Exemplo n.º 15
0
                                     num_classes=args.num_classes,
                                     output_size=[224, 224])
#batch_size=32
batch_size = args.batch_size
val_num_batches = np.floor(len(val_preprocessor.labels) / batch_size).astype(
    np.int16)

# In[4]:

# Placeholders
x = tf.placeholder(tf.float32, [None, 224, 224, 3])
y = tf.placeholder(tf.float32, [None, args.num_classes])
is_training = tf.placeholder('bool', [])

# Model
model = ResNetModel(is_training, depth=101, num_classes=args.num_classes)
model.inference(x)

# In[9]:

saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, checkpoint_path)

    avg_pool = sess.graph.get_tensor_by_name("avg_pool:0")

    label_onehot = np.zeros(
        (val_num_batches, args.batch_size, args.num_classes))
    features = np.zeros((val_num_batches, args.batch_size, 2048))
    #features=[]