def train_new_model(model, train_queue, valid_queue, test_queue):
    ori_model = model.module if args.distributed else model
    optimizer = get_optimizer(model, args)
    scheduler = get_scheduler(optimizer, args)
    drop_layers = ori_model.drop_layers()
    criterion = get_criterion(args.classes, args.label_smoothing)

    for epoch in range(args.epochs):
        scheduler.step()
        if args.warmup and epoch < args.warmup_epochs:
            lr = args.learning_rate * epoch / args.warmup_epochs + args.warmup_lr
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
            cond_logging('epoch %d lr %e', epoch, lr)
        else:
            lr = scheduler.get_lr()[0]
            cond_logging('epoch %d lr %e', epoch, lr)

        if args.distributed:
            train_queue.sampler.set_epoch(epoch)
        if args.epd:
            drop_rate = args.drop_rate * epoch / args.epochs
        else:
            drop_rate = args.drop_rate
        drop_rates = [drop_rate] * drop_layers
        if args.layerd:
            for i in range(drop_layers):
                drop_rates[i] = drop_rates[i] * (i + 1) / drop_layers
        ori_model.set_drop_rates(drop_rates)
        cond_logging('drop rates:')
        cond_logging(ori_model.drop_rates)

        #training
        train_acc, train_obj = train(train_queue, model, criterion, optimizer,
                                     lr, args.report_freq, args.world_size,
                                     args.distributed, args.local_rank)

        cond_logging('train acc %f', train_acc)
        #validation
        drop_rates = [0] * drop_layers
        ori_model.set_drop_rates(drop_rates)
        valid_acc, valid_obj = infer(valid_queue, model, criterion,
                                     args.report_freq, args.world_size,
                                     args.distributed, args.local_rank)
        cond_logging('valid acc %f', valid_acc)
        test_acc, test_obj = infer(test_queue, model, criterion,
                                   args.report_freq, args.world_size,
                                   args.distributed, args.local_rank)
        cond_logging('test acc %f', test_acc)
    return model
Exemplo n.º 2
0
def main(argv):
    frames = 16
    batch_size = 1
    input_shape = [320, 180, 3]
    model = model.build(input_shape, None, None, frames=frames)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        iterations = 100
        input_shape = model.input_shape.as_list()[1:]
        print("Creating inputs with shape", input_shape)
        start_time = time.monotonic()
        for i in range(iterations):
            output = model.infer(sess,
                                 {model.input: [np.random.rand(*input_shape)]})
            print(output)

        end_time = time.monotonic()
        total_time = end_time - start_time
        inference_time = total_time / iterations

        print(iterations, "inferences in", total_time, "s")
        print(inference_time, "s per inference")
        print(1 / inference_time, "inferences per second")
        print("up to", frames / inference_time,
              "frames per second without skip")
Exemplo n.º 3
0
def start_page():
    if request.method == 'POST':
        context = request.form['context']
        question = request.form['question']
        result = model.infer(context=context, question=question)
        return result

    else:
        return render_template('start.html', name=None)
Exemplo n.º 4
0
def hello_world():
    print('request received')
    model = load()
    content = request.get_json()
    query = content['query']
    # TODO try-except blocks to check that everything is correct
    print('returning ans')
    output = {'answer': infer(model, query)}
    return jsonify(output)
Exemplo n.º 5
0
def api_v1_infer():
    """API Function"""
    j = request.get_json()
    default_txt = "Please write something"
    r = (
          (
            infer(j['text']) if len(j['text']) > 0 else default_txt
          ) if 'text' in j else default_txt
        ) if j is not None else default_txt
    print(r)
    return jsonify(dict(input='YOU: ' + j['text'], output='CHATBOT: ' + r))
from util import to_spec, to_wav_file, bss_eval_sdr
import librosa
from statistics import median

batchSize = 1

# Model
print('Initialize network model')
with tf.device('/device:GPU:0'):
    x_mixed = tf.placeholder(tf.float32,
                             shape=(batchSize, 512, 64, 1),
                             name='x_mixed')
    y_mixed = tf.placeholder(tf.float32,
                             shape=(batchSize, 512, 64, 2),
                             name='y_mixed')
    y_pred = infer(x_mixed, 4)
    #y_output = tf.multiply(x_mixed,y_pred)
    net = tf.make_template('net', y_pred)

x_input = np.zeros((batchSize, 512, 64, 1), dtype=np.float32)
#y_input = np.zeros((batchSize, 512, 64, 2),dtype=np.float32)

sdr_vocal = []
sdr_other = []
sdr_bass = []
sdr_drum = []

with tf.Session(config=NetConfig_DSD_100.session_conf) as sess:

    # Initialized, Load state
    sess.run(tf.global_variables_initializer())
Exemplo n.º 7
0
def main(params):

    with tf.Session() as sess:
        model = infer(sess, params)
        model.test(params)
Exemplo n.º 8
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-ckpt', '--checkpoint_path', type=str, required=True,
                        help='trained checkpoint path')
    parser.add_argument('--model_name', type=str, default='345K',
                        help='model (architecture) name')
    parser.add_argument("--device", type=int, default=1)
    parser.add_argument("--seed", type=int, default=None)
    parser.add_argument("--nsamples", type=int, default=2)
    parser.add_argument("--batch_size", type=int, default=1)
    parser.add_argument("--tok_length", type=int, default=128)
    parser.add_argument("--sent_length", type=int, default=3)
    parser.add_argument("--top_k", type=int, default=0)
    parser.add_argument("--top_p", type=float, default=.0)
    parser.add_argument("--context", type=str, default="북한의 발사체 도발은 지난달 25일 단거리 탄도미사일로 추정되는 발사체를 쏜 이후 13일 동안 4번째입니다.")
    args = parser.parse_args()

    model = GPT(args.checkpoint_path,
                args.model_name,
                args.device,
                args.seed,
                args.nsamples,
                args.batch_size,
                args.tok_length,
                args.sent_length,
                args.top_k,
                args.top_p)

    out = model.infer(args.context)
    print(out)
Exemplo n.º 9
0
    parser.add_argument(
        '--output_dir',
        help='GCS location to write checkpoints and export models',
        required=True)
    parser.add_argument('--train_steps',
                        help='How many batches to run training job for',
                        type=int,
                        default=20000)

    args = parser.parse_args()
    arguments = args.__dict__

    # unused args provided by service
    arguments.pop('job_dir', None)
    arguments.pop('job-dir', None)

    output_dir = arguments.pop('output_dir')
    model.init(arguments.pop('train_steps'))

    # Append trial_id to path if we are doing hptuning
    # This code can be removed if you are not using hyperparameter tuning
    output_dir = os.path.join(
        output_dir,
        json.loads(os.environ.get('TF_CONFIG', '{}')).get('task',
                                                          {}).get('trail', ''))

    # Run the training job
    #shutil.rmtree(output_dir, ignore_errors=True) # start fresh each time
    learn_runner.run(model.experiment_fn, output_dir)
    model.infer(output_dir)
def do_nearest(model, sess, stream_input, restore_path, dataset, split, k=4):
    """
  From encoding of images, find the nearest neighbor of each image
  
  Args:
    model            :         model
    sess             :         tensorflow session
    stream_input     :         data manager
    restore_path     :         where is the restore file
    dataset          :         which dataset
    split            :         which split (valid, test, etc.)
    k                :         number of closest
  """

    print('------------------------------------------------------')
    print('Computing the %d nearest neighbors of images of %s from %s ...' %
          (k, dataset, split))
    print('------------------------------------------------------\n')

    images, labels, name = stream_input.get_inputs()
    guess, _, inter_feature = model.infer(images, inter_layer=True)

    num_iter = int(stream_input.size_epoch / stream_input.batch_size)
    num_examples = num_iter * stream_input.batch_size
    dimension = inter_feature.get_shape()[1].value

    index_representation = np.empty((num_examples), dtype='a1000')
    representation = np.zeros((num_examples, dimension))
    closest = np.zeros((num_examples, k + 1))

    print('------------------------------------------------------'
          )  #Initialisation of weights
    sess.run(tf.global_variables_initializer())
    print('Restoring from previous checkpoint...')
    ret = restore_model(model, sess, restore_path)
    tf.train.start_queue_runners(sess=sess)
    stream_input.start_threads(sess)
    print('------------------------------------------------------')
    print('Done! Training ended at step %s' % (ret))
    print('------------------------------------------------------ \n')

    print('------------------------------------------------------')
    print('There are %d data to process in %d iterations' %
          (num_examples, num_iter))
    print('------------------------------------------------------ \n')

    for step in range(num_iter):

        sys.stdout.write('%d out of %d    \r' % (step, num_iter))
        sys.stdout.flush()

        code, name_ret = sess.run([inter_feature, name])
        for i in range(stream_input.batch_size):
            for j in range(dimension):
                representation[i + step * stream_input.batch_size, j] = code[i,
                                                                             j]
            index_representation[i +
                                 step * stream_input.batch_size] = name_ret[i]

    print('------------------------------------------------------ ')
    print('Step 1: Done!')
    print('------------------------------------------------------ \n')

    path = restore_path + '/' + model.name
    path += '/neighbour_' + dataset + '_' + split + '/'
    if tf.gfile.Exists(path):
        tf.gfile.DeleteRecursively(path)
    tf.gfile.MakeDirs(path)

    for i in range(num_examples):

        sys.stdout.write('%d out of %d    \r' % (i, num_examples))
        sys.stdout.flush()

        ret = np.reshape(np.tile(representation[i], num_examples),
                         (num_examples, dimension))
        distance = np.sum(np.square(representation - ret), axis=1)
        closest[i] = np.argsort(distance)[0:k + 1]
        shutil.copy2(
            os.path.join(stream_input.f1, str(index_representation[i])),
            path + str(index_representation[i]).split('.')[0] + '_im' + '.jpg')
        for j in range(k + 1):
            if j > 0:
                shutil.copy2(
                    os.path.join(stream_input.f1,
                                 str(index_representation[int(closest[i,
                                                                      j])])),
                    path + str(index_representation[i]).split('.')[0] +
                    '_neighbour_' + str(j) + '_' +
                    str(index_representation[int(
                        closest[i, j])]).split('.')[0] + '.jpg')

    print('------------------------------------------------------ ')
    print('Step 2: Done!')
    print('------------------------------------------------------ \n')
def compute_inter(model, sess, stream_input, restore_path, dataset, split,
                  arithmetic):
    """
  From real encoding of images, comoute new encodign and save the final results
  
  Args:
    model            :         model
    sess             :         tensorflow session
    stream_input     :         data manager
    restore_path     :         where is the restore file
    dataset          :         which dataset
    split            :         which split (valid, test, etc.)
    arithmetic  :         how to transform encoding (1 is add, 2 subtract, 3 is linear combination)
  """

    print('------------------------------------------------------')
    print('Computing operations on encoding of images of %s from %s ...' %
          (dataset, split))
    print('------------------------------------------------------')
    if arithmetic == 1:
        print('Operation is addition')
    elif arithmetic == 2:
        print('Operation is subtraction')
    elif arithmetic == 3:
        print('Operation is linear combination')
    print('------------------------------------------------------\n')

    images, labels, name = stream_input.get_inputs()
    guess, _, _ = model.infer(images, arithmetic=arithmetic)

    print('------------------------------------------------------'
          )  #Initialisation of weights
    sess.run(tf.global_variables_initializer())
    print('Restoring from previous checkpoint...')
    ret_bis = restore_model(model, sess, restore_path)
    tf.train.start_queue_runners(sess=sess)
    stream_input.start_threads(sess)
    print('------------------------------------------------------')
    print('Done! Training ended at step %s' % (ret_bis))
    print('------------------------------------------------------ \n')

    path = restore_path + '/' + model.name
    path += '/results_arith_' + dataset + '_' + split + '_' + str(
        arithmetic) + '/'
    if tf.gfile.Exists(path):
        tf.gfile.DeleteRecursively(path)
    tf.gfile.MakeDirs(path)

    num_iter = int(stream_input.size_epoch / stream_input.batch_size)
    print('------------------------------------------------------')
    print('There are %d data to process in %d iterations' %
          (int(stream_input.size_epoch), num_iter))
    print('------------------------------------------------------ \n')

    for step in range(num_iter):

        sys.stdout.write('%d out of %d    \r' % (step, num_iter))
        sys.stdout.flush()

        images_ret, labels_ret, guess_ret, name_ret = sess.run(
            [images, labels, guess, name])

        for i in range(model.batch_size):

            ret_path = path + str(i + step * model.batch_size) + '_' + str(
                name_ret[i])
            ret = np.asarray(images_ret[i] * 255, dtype="int8")
            Image.fromarray(ret, 'RGB').save(ret_path + '_' + 'im' + '.jpg')
            ret = np.asarray(labels_ret[i] * 255, dtype="int8")
            Image.fromarray(ret, 'P').save(ret_path + '_' + 'lab' + '.png')
            ret = np.asarray(guess_ret[i] * 255, dtype="int8")
            Image.fromarray(ret, 'P').save(ret_path + '_' + 'guess' + '.png')

    print('------------------------------------------------------ ')
    print('Done!')
    print('------------------------------------------------------ \n')
def visual_tracking(model, sess, restore_path, folder_data):
    """
  Compute the tracking boundig boxe naively for each frame of the given sequence
  
  Args:
    model            :         model
    sess             :         tensorflow session
    restore_path     :         where is the restore file
    folder_data      :         the sequence location
  """

    print('------------------------------------------------------')
    print('Performing tracking on given sequence %s ...' % (folder_data))
    print('------------------------------------------------------\n')

    batch_size = model.batch_size
    images = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
    guess, _, _ = model.infer(images)
    zeros = tf.zeros_like(guess)
    ones = tf.ones_like(guess)
    threshold = 2 * tf.reduce_mean(guess, keep_dims=True)  #Adaptive threshold
    output = tf.select(guess > threshold, ones, zeros)

    print('------------------------------------------------------'
          )  #Initialisation of weights
    sess.run(tf.global_variables_initializer())
    print('Restoring from previous checkpoint...')
    ret_bis = restore_model(model, sess, restore_path)
    print('------------------------------------------------------')
    print('Done! Training ended at step %s' % (ret_bis))
    print('------------------------------------------------------ \n')

    path = folder_data + '/results_tracking/'  #Output folder
    if tf.gfile.Exists(path):
        tf.gfile.DeleteRecursively(path)
    tf.gfile.MakeDirs(path)
    tf.gfile.MakeDirs(path + 'out/')
    tf.gfile.MakeDirs(path + 'binary/')
    tf.gfile.MakeDirs(path + 'bb/')

    images_batch = np.empty((0, 224, 224, 3))
    index_representation = np.empty((batch_size), dtype='a1000')
    index = 0
    current = 0
    tot = len([f for f in os.listdir(folder_data + '/img/') if ".jpg" in f])

    print('------------------------------------------------------')
    for e in np.array(
        [f for f in os.listdir(folder_data + '/img/') if ".jpg" in f]):

        current += 1
        sys.stdout.write('%d out of %d    \r' % (current, tot))
        sys.stdout.flush()

        im = Image.open(
            os.path.join(folder_data + '/img/',
                         e))  #Read image one by one and add them to the batch
        im.load()
        w, h = im.size
        im = im.resize((224, 224))
        im_a = np.asarray(im, dtype="int8")
        images_batch = np.append(images_batch, [im_a], axis=0)
        index_representation[index] = e

        index += 1
        if index == batch_size:  #Ready to be processed

            guess_ret, threshold_ret, output_ret = sess.run(
                [guess, threshold, output],
                feed_dict={images: images_batch / 255})

            for i in range(batch_size):
                ret_out = np.asarray(guess_ret[i] * 255,
                                     dtype="int8")  #Score output
                Image.fromarray(ret_out, 'P').save(path + 'out/' +
                                                   index_representation[i] +
                                                   '_out' + '.png')

                ret_out_b = np.asarray(output_ret[i] * 255,
                                       dtype="int8")  #Binary output
                Image.fromarray(ret_out_b, 'P').save(path + 'binary/' +
                                                     index_representation[i] +
                                                     '_binary' + '.png')

                im = Image.fromarray(
                    np.asarray(images_batch[i], dtype=np.uint8), 'RGB')
                draw = ImageDraw.Draw(im)  #Bounding box
                ret_mask = np.nonzero(ret_out_b)
                x0 = np.amin(ret_mask, axis=1)[1]
                y0 = np.amin(ret_mask, axis=1)[0]
                x1 = np.amax(ret_mask, axis=1)[1]
                y1 = np.amax(ret_mask, axis=1)[0]
                draw.rectangle([x0, y0, x1, y1], outline='red')
                im_b = im.resize((2 * w, 2 * h))
                im_b.save(path + 'bb/' + index_representation[i] + '_final' +
                          '.png')
                del draw

            images_batch = np.empty((0, 224, 224, 3))
            index_representation = np.empty((batch_size), dtype='a1000')
            index = 0
    print('------------------------------------------------------ \n')

    if index != 0:  #Last batch not processed

        number = index
        while index != batch_size:
            im = np.zeros((224, 224, 3))
            images_batch = np.append(images_batch, [im], axis=0)
            index += 1

        guess_ret, threshold_ret, output_ret = sess.run(
            [guess, threshold, output], feed_dict={images: images_batch / 255})

        for i in range(number):
            ret_out = np.asarray(guess_ret[i] * 255,
                                 dtype="int8")  #Score output
            Image.fromarray(ret_out,
                            'P').save(path + 'out/' + index_representation[i] +
                                      '_out' + '.png')

            ret_out_b = np.asarray(output_ret[i] * 255,
                                   dtype="int8")  #Binary output
            Image.fromarray(ret_out_b, 'P').save(path + 'binary/' +
                                                 index_representation[i] +
                                                 '_binary' + '.png')

            im = Image.fromarray(np.asarray(images_batch[i], dtype=np.uint8),
                                 'RGB')
            draw = ImageDraw.Draw(im)  #Bounding box
            ret_mask = np.nonzero(ret_out_b)
            x0 = np.amin(ret_mask, axis=1)[1]
            y0 = np.amin(ret_mask, axis=1)[0]
            x1 = np.amax(ret_mask, axis=1)[1]
            y1 = np.amax(ret_mask, axis=1)[0]
            draw.rectangle([x0, y0, x1, y1], outline='red')
            im_b = im.resize((2 * w, 2 * h))
            im_b.save(path + 'bb/' + index_representation[i] + '_final' +
                      '.png')
            del draw

    print('------------------------------------------------------ ')
    print('Done!')
    print('------------------------------------------------------ \n')
def do_train(model,
             sess,
             stream_input,
             stream_input_aux,
             max_step,
             log_folder,
             mode,
             weight_file=None,
             model_to_copy=None,
             model_copy_is_concat=False,
             valid=True,
             dataset=None,
             save_copy=False):
    """
  Train the model
  
  Args:
    model                :         model to compute the score of
    sess                 :         tensorflow session
    stream_input         :         data manager
    stream_input_aux     :         data manager for auxiliary dataset (valdiation)
    max_step             :         numbers of step to train
    log_folder           :         where is the log of the model
    mode                 :         how to initialize weights
    weight_file          :         location of vgg model if pretraining
    model_to_copy        :         weights of model to copy if restoring only weight (from another model)
    model_copy_is_concat :         whether the model to copy has direct connections
    valid                :         whether to use validation
    dataset              :         dataset for the auxiliary data using during validation
    save_copy            :         whether to save a copy of the model at the end with the weight only
  """

    print('------------------------------------------------------')
    print('Starting training the model (number of steps is %d) ...' %
          (max_step))
    print('------------------------------------------------------\n')

    global_step = tf.Variable(0, trainable=False)
    images, labels, _ = stream_input.get_inputs()
    guess, control, _ = model.infer(images, debug=True)
    loss = model.loss(guess, labels)
    train_op = model.train(loss, global_step)

    if valid:
        images_aux, labels_aux, _ = stream_input_aux.get_inputs()
        guess_aux, _, _ = model.infer(images_aux)
        loss_aux = model.loss(guess_aux, labels_aux, loss_bis=True)
        zeros = tf.zeros_like(labels_aux)
        ones = tf.ones_like(labels_aux)
        threshold = [i for i in range(255)]
        liste = []

        for thres in threshold:
            predicted_class = tf.select(guess_aux * 255 > thres, ones, zeros)
            true_positive = tf.reduce_sum(
                tf.cast(
                    tf.logical_and(tf.equal(predicted_class, ones),
                                   tf.equal(labels_aux, ones)), tf.float32),
                [1, 2])
            false_positive = tf.reduce_sum(
                tf.cast(
                    tf.logical_and(tf.equal(predicted_class, ones),
                                   tf.equal(labels_aux, zeros)), tf.float32),
                [1, 2])
            true_negative = tf.reduce_sum(
                tf.cast(
                    tf.logical_and(tf.equal(predicted_class, zeros),
                                   tf.equal(labels_aux, zeros)), tf.float32),
                [1, 2])
            false_negative = tf.reduce_sum(
                tf.cast(
                    tf.logical_and(tf.equal(predicted_class, zeros),
                                   tf.equal(labels_aux, ones)), tf.float32),
                [1, 2])
            precision = tf.reduce_sum(true_positive /
                                      (1e-8 + true_positive + false_positive))
            recall = tf.reduce_sum(true_positive /
                                   (1e-8 + true_positive + false_negative))
            liste.append(tf.pack([precision, recall]))
        result = tf.pack(liste)
        adaptive_threshold = (
            2 * tf.reduce_mean(guess_aux, [0, 1], keep_dims=True))
        adaptive_output = tf.select(guess_aux > adaptive_threshold, ones,
                                    zeros)
        adaptive_true_positive = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(adaptive_output, ones),
                               tf.equal(labels_aux, ones)), tf.float32),
            [1, 2])
        adaptive_false_positive = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(adaptive_output, ones),
                               tf.equal(labels_aux, zeros)), tf.float32),
            [1, 2])
        adaptive_true_negative = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(adaptive_output, zeros),
                               tf.equal(labels_aux, zeros)), tf.float32),
            [1, 2])
        adaptive_false_negative = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(adaptive_output, zeros),
                               tf.equal(labels_aux, ones)), tf.float32),
            [1, 2])
        adaptive_precision = tf.reduce_sum(
            adaptive_true_positive /
            (1e-8 + adaptive_true_positive + adaptive_false_positive))
        adaptive_recall = tf.reduce_sum(
            adaptive_true_positive /
            (1e-8 + adaptive_true_positive + adaptive_false_negative))
        adaptive_f_measure = tf.reduce_sum(
            1.3 * adaptive_precision * adaptive_recall /
            (1e-8 + 0.3 * adaptive_precision + adaptive_recall))

    print('------------------------------------------------------'
          )  #Initialisation of weights
    sess.run(tf.global_variables_initializer())
    if mode == 'pretrain':
        print('Loading weights from vgg file...')
        load_weights(model, sess, weight_file)
    elif mode == 'restore':
        print('Restoring from previous checkpoint...')
        sess.run(
            global_step.assign(int(restore_model(model, sess, log_folder))))
    elif mode == 'restore_w_only':
        print('Restoring (weights only) from model %s ...' % (model_to_copy))
        restore_weight_from(model,
                            model_to_copy,
                            sess,
                            log_folder,
                            copy_concat=model_copy_is_concat)
    elif mode == 'scratch':
        print('Initializing the weights from scratch')
    print('------------------------------------------------------')
    print('Done!')
    print('------------------------------------------------------ \n')

    tf.train.start_queue_runners(sess=sess)
    stream_input.start_threads(sess)

    if valid:
        stream_input_aux.start_threads(sess)
        if tf.gfile.Exists(log_folder + '/' + model.name + '_validation_log'):
            tf.gfile.DeleteRecursively(log_folder + '/' + model.name +
                                       '_validation_log')
        tf.gfile.MakeDirs(log_folder + '/' + model.name + '_validation_log')

    for step in range(max_step):
        start_time = time.time()
        _, loss_value, control_value, step_b = sess.run(
            [train_op, loss, control,
             tf.to_int32(global_step)])
        duration = time.time() - start_time

        if step % 5 == 0:  #Display progress
            print(
                '%s: step %d out of %d, loss = %.5f (%.1f examples/sec; %.3f sec/batch)  --- control value is %.12f'
                % (datetime.now(), step_b, max_step - step + step_b,
                   loss_value, stream_input.batch_size / duration,
                   float(duration), control_value))

        if step % 1000 == 0 and step != 0:  #Save model
            save_model(model, sess, log_folder, step_b)

        if valid and step % 5000 == 0:  #Validation
            print('------------------------------------------------------')
            print('Doing validation ...')
            print('------------------------------------------------------ \n')

            loss_tot = 0
            num_iter = int(stream_input_aux.size_epoch /
                           stream_input_aux.batch_size)
            counter = np.zeros((256, 3))

            for step1 in range(num_iter):
                sys.stdout.write('%d out of %d    \r' % (step1, num_iter))
                sys.stdout.flush()
                result_ret, adaptive_precision_ret, adaptive_recall_ret, adaptive_f_measure_ret, loss_value = sess.run(
                    [
                        result, adaptive_precision, adaptive_recall,
                        adaptive_f_measure, loss_aux
                    ])
                loss_tot += loss_value
                loss_mean = loss_tot / (step1 + 1)
                for i in range(255):
                    for j in range(2):
                        counter[i, j] += result_ret[i, j]
                counter[255, 0] += adaptive_precision_ret
                counter[255, 1] += adaptive_recall_ret
                counter[255, 2] += adaptive_f_measure_ret
            file = open(
                log_folder + '/' + model.name + '_validation_log/' +
                str(step_b) + ".txt", 'w')
            file.write('model name is ' + model.name + '\n')
            file.write('number trained step is ' + str(step_b) + '\n')
            file.write('aux dataset is ' + str(dataset) + '\n')
            file.write('loss mean is ' + str(loss_mean) + '\n')
            file.write('split of dataset is valid\n')
            for i in range(256):
                precision = counter[i, 0] / (num_iter *
                                             stream_input_aux.batch_size)
                recall = counter[i,
                                 1] / (num_iter * stream_input_aux.batch_size)
                file.write(
                    'Precision %0.02f percent -- Recall %0.02f percent\n' %
                    (precision * 100, recall * 100))
                if i == 255:
                    f = counter[i,
                                2] / (num_iter * stream_input_aux.batch_size)
                    file.write('fscore %0.04f\n' % (f))
                if i % 20 == 0:
                    print('Precision %0.02f percent -- Recall %0.02f percent' %
                          (precision * 100, recall * 100))
            file.close()
            print('\n------------------------------------------------------')
            print('Done!')
            print('------------------------------------------------------ \n')

    save_model(model, sess, log_folder, step_b)  #Final save
    print('------------------------------------------------------')
    print('Save done!')
    if save_copy:
        save_weight_only(model, sess, log_folder, step_b)  #Final save
        print('Saving weights onlt done!')
    print('------------------------------------------------------ \n')
def compute_score(model,
                  sess,
                  stream_input,
                  restore_path,
                  dataset,
                  split,
                  write=False,
                  save=False):
    """
  Compute the precision recall score for a given model, with the addition of the F1 score.
  
  Args:
    model            :         model to compute the score of
    sess             :         tensorflow session
    stream_input     :         data manager
    restore_path     :         where is the restore file
    dataset          :         dataset tested
    split            :         which split (valid, test, etc.)
    write            :         whether to write the result in a file
    save             :         whether to save the resulting saliency maps
  """

    print('------------------------------------------------------')
    print('Computing score of the model on %s from %s ...' % (dataset, split))
    print('Write result file : %r -- Save images : %r' % (write, save))
    print('------------------------------------------------------\n')

    images, labels, names = stream_input.get_inputs()
    guess, _, _ = model.infer(images)

    zeros = tf.zeros_like(labels)
    ones = tf.ones_like(labels)
    threshold = [i for i in range(255)]
    liste = []
    for t in threshold:
        predicted_class = tf.select(guess * 255 > t, ones, zeros)
        true_positive = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(predicted_class, ones),
                               tf.equal(labels, ones)), tf.float32), [1, 2])
        false_positive = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(predicted_class, ones),
                               tf.equal(labels, zeros)), tf.float32), [1, 2])
        true_negative = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(predicted_class, zeros),
                               tf.equal(labels, zeros)), tf.float32), [1, 2])
        false_negative = tf.reduce_sum(
            tf.cast(
                tf.logical_and(tf.equal(predicted_class, zeros),
                               tf.equal(labels, ones)), tf.float32), [1, 2])
        precision = tf.reduce_sum(true_positive /
                                  (1e-8 + true_positive + false_positive))
        recall = tf.reduce_sum(true_positive /
                               (1e-8 + true_positive + false_negative))
        liste.append(tf.pack([precision, recall]))
    result = tf.pack(liste)

    adaptive_threshold = 2 * tf.reduce_mean(guess, [1, 2], keep_dims=True)
    adaptive_output = tf.select(guess > adaptive_threshold, ones, zeros)
    adaptive_true_positive = tf.reduce_sum(
        tf.cast(
            tf.logical_and(tf.equal(adaptive_output, ones),
                           tf.equal(labels, ones)), tf.float32), [1, 2])
    adaptive_false_positive = tf.reduce_sum(
        tf.cast(
            tf.logical_and(tf.equal(adaptive_output, ones),
                           tf.equal(labels, zeros)), tf.float32), [1, 2])
    adaptive_true_negative = tf.reduce_sum(
        tf.cast(
            tf.logical_and(tf.equal(adaptive_output, zeros),
                           tf.equal(labels, zeros)), tf.float32), [1, 2])
    adaptive_false_negative = tf.reduce_sum(
        tf.cast(
            tf.logical_and(tf.equal(adaptive_output, zeros),
                           tf.equal(labels, ones)), tf.float32), [1, 2])
    adaptive_precision = tf.reduce_sum(
        adaptive_true_positive /
        (1e-8 + adaptive_true_positive + adaptive_false_positive))
    adaptive_recall = tf.reduce_sum(
        adaptive_true_positive /
        (1e-8 + adaptive_true_positive + adaptive_false_negative))
    adaptive_f_measure = tf.reduce_sum(
        1.3 * adaptive_precision * adaptive_recall /
        (1e-8 + 0.3 * adaptive_precision + adaptive_recall))

    print('------------------------------------------------------'
          )  #Initialisation of weights
    sess.run(tf.global_variables_initializer())
    print('Restoring from previous checkpoint...')
    ret_bis = restore_model(model, sess, restore_path)
    tf.train.start_queue_runners(sess=sess)
    stream_input.start_threads(sess)
    print('------------------------------------------------------')
    print('Done! Training ended at step %s' % (ret_bis))
    print('------------------------------------------------------ \n')

    if save:  #Save result images
        path = restore_path + '/' + model.name
        path += '/result_' + dataset + '/'
        if tf.gfile.Exists(path):
            tf.gfile.DeleteRecursively(path)
        tf.gfile.MakeDirs(path)

    num_iter = int(stream_input.size_epoch / stream_input.batch_size)
    counter = np.zeros((256, 3))

    print('------------------------------------------------------')
    for step in range(num_iter):  #Compute score

        sys.stdout.write('%d out of %d    \r' % (step, num_iter))
        sys.stdout.flush()

        result_ret, adaptive_precision_ret, adaptive_recall_ret, adaptive_f_measure_ret, names_ret, images_ret, labels_ret, guess_ret = sess.run(
            [
                result, adaptive_precision, adaptive_recall,
                adaptive_f_measure, names, images, labels, guess
            ])

        for i in range(stream_input.batch_size):

            if save:  #Save result images
                ret_path = path + names_ret[i]
                ret = np.asarray(images_ret[i] * 255, dtype="int8")
                Image.fromarray(ret,
                                'RGB').save(ret_path + '_' + 'im' + '.png')
                ret = np.asarray(labels_ret[i] * 255, dtype="int8")
                Image.fromarray(ret, 'P').save(ret_path + '_' + 'lab' + '.png')
                ret = np.asarray(guess_ret[i] * 255, dtype="int8")
                Image.fromarray(ret,
                                'P').save(ret_path + '_' + 'guess' + '.png')

        for i in range(255):
            for j in range(2):
                counter[i, j] += result_ret[i, j]
        counter[255, 0] += adaptive_precision_ret
        counter[255, 1] += adaptive_recall_ret
        counter[255, 2] += adaptive_f_measure_ret
    print('------------------------------------------------------ \n')

    for i in range(256):
        if i == 255:
            print('\n------------------------------------------------------')
        precision = counter[i, 0] / (num_iter * stream_input.batch_size)
        recall = counter[i, 1] / (num_iter * stream_input.batch_size)
        print('Precision %0.02f percent -- Recall %0.02f percent' %
              (precision * 100, recall * 100))
        if i == 255:
            print('fscore %0.04f' % (counter[i, 2] /
                                     (num_iter * stream_input.batch_size)))
            print('------------------------------------------------------ \n')

    if write:  #Save score
        file = open(restore_path + '/' + model.name + "/" + dataset + ".txt",
                    'w')
        file.write('model name is ' + model.name + '\n')
        file.write('number trained step is ' + str(ret_bis) + '\n')
        file.write('test dataset is ' + str(dataset) + '\n')
        file.write('split of dataset is ' + str(split) + '\n')
        for i in range(255):
            file.write(
                'Precision %0.02f percent -- Recall %0.02f percent\n' %
                (counter[i, 0] /
                 (num_iter * stream_input.batch_size) * 100, counter[i, 1] /
                 (num_iter * stream_input.batch_size) * 100))
        file.write(
            'Precision %0.02f percent -- Recall %0.02f percent\n' %
            (counter[255, 0] /
             (num_iter * stream_input.batch_size) * 100, counter[255, 1] /
             (num_iter * stream_input.batch_size) * 100))
        file.write('fscore %0.04f\n' % (counter[255, 2] /
                                        (num_iter * stream_input.batch_size)))
        file.close()
        print('------------------------------------------------------')
        print('Log file written')
        print('------------------------------------------------------ \n')