Exemple #1
0
def get_midi_from_midi(data):
    save_path = BytesIO(data)
    save_path_with_left_right = BytesIO()
    inference("example/checkpoint.pth", save_path, save_path_with_left_right)
    save_path_with_left_right.seek(0)

    return save_path_with_left_right
Exemple #2
0
def evaluate(model_name, weight_file, image_size, nb_classes, batch_size,
             val_file_path, data_dir, label_dir):
    current_dir = os.path.dirname(os.path.realpath(__file__))
    save_dir = os.path.join(current_dir, 'Models/' + model_name + '/res/')
    if os.path.exists(save_dir) == False:
        os.mkdir(save_dir)
    fp = open(val_file_path)
    image_list = fp.readlines()
    fp.close()

    start_time = time.time()
    inference(model_name,
              weight_file,
              image_size,
              image_list,
              data_dir,
              label_dir,
              return_results=False,
              save_dir=save_dir)
    duration = time.time() - start_time
    print('{}s used to make predictions.\n'.format(duration))

    start_time = time.time()
    conf_m, IOU, meanIOU = calculate_iou(model_name, nb_classes, save_dir,
                                         label_dir, image_list)
    print('IOU: ')
    print(IOU)
    print('meanIOU: %f' % meanIOU)
    print('pixel acc: %f' % (np.sum(np.diag(conf_m)) / np.sum(conf_m)))
    duration = time.time() - start_time
    print('{}s used to calculate IOU.\n'.format(duration))
Exemple #3
0
    def testAttentionModelWithInferIndices(self):
        hparams = common_test_utils.create_test_hparams(
            encoder_type="uni",
            num_layers=1,
            attention="scaled_luong",
            attention_architecture="standard",
            use_residual=False,
            inference_indices=[1, 2])
        # TODO(rzhao): Make infer indices support batch_size > 1.
        hparams.infer_batch_size = 1
        vocab_prefix = "nmt/testdata/test_infer_vocab"
        hparams.add_hparam("src_vocab_file", vocab_prefix + "." + hparams.src)
        hparams.add_hparam("tgt_vocab_file", vocab_prefix + "." + hparams.tgt)

        infer_file = "nmt/testdata/test_infer_file"
        out_dir = os.path.join(tf.test.get_temp_dir(),
                               "attention_infer_with_indices")
        hparams.add_hparam("out_dir", out_dir)
        os.makedirs(out_dir)
        output_infer = os.path.join(out_dir, "output_infer")
        ckpt = self._createTestInferCheckpoint(hparams, out_dir)
        inference.inference(ckpt, infer_file, output_infer, hparams)
        with open(output_infer) as f:
            self.assertEqual(2, len(list(f)))
        self.assertTrue(os.path.exists(output_infer + str(1) + ".png"))
        self.assertTrue(os.path.exists(output_infer + str(2) + ".png"))
Exemple #4
0
def main(config, resume):
    train_logger = Logger()

    print('Create train loader')
    # train_data_loader = BoxesDataLoader(config, name='train')

    model = NatashaDetection(config)
    model.summary()

    loss = eval(config['loss'])

    print('Create trainer')
    # trainer = Trainer(model, loss,
    #                   resume=resume,
    #                   config=config,
    #                   data_loader=train_data_loader,
    #                   train_logger=train_logger)

    print('Start training')
    # trainer.train()

    print('Create test loader')
    test_data_loader = BoxesDataLoader(config, name='test')
    model.eval()
    print('Do inference')
    inference(test_data_loader, model)
Exemple #5
0
def lstm_model(X, y, is_training):
    # 使用四层fir网络结构
    inputs = tf.reshape(X, [tf.shape(X)[0], tf.shape(X)[2]])
    layer2_input_ = inference.inference('layer1',inputs,constant.LAYER1_PART_NODE,\
                      constant.LAYER2_PART_NODE,constant.LAYER1_PART_NUMBER,constant.LAYER2_PART_NUMBER,\
                      constant.LAYER2_TIME_DELAY,None)
    layer2_input = tf.nn.relu(layer2_input_)

    layer3_input_ = inference.inference('layer2',layer2_input,constant.LAYER2_PART_NODE,constant.LAYER3_PART_NODE,\
                         constant.LAYER2_PART_NUMBER,constant.LAYER3_PART_NUMBER,constant.LAYER3_TIME_DELAY,\
                         None)
    layer3_input = tf.nn.relu(layer3_input_)

    out = inference.inference('layer3',layer3_input,constant.LAYER3_PART_NODE,constant.OUTPUT_NODE,\
                    constant.LAYER3_PART_NUMBER,constant.OUTPUT_PART_NUMBER,\
                    constant.LAYER4_TIME_DELAY,None)

    # 只在训练时计算损失函数和优化步骤。测试时直接返回预测结果。
    if not is_training:
        return out, None, None

    # 计算损失函数。
    loss = tf.losses.mean_squared_error(labels=y, predictions=out)

    # 创建模型优化器并得到优化步骤。
    train_op = tf.contrib.layers.optimize_loss(loss,
                                               tf.train.get_global_step(),
                                               optimizer="Adagrad",
                                               learning_rate=0.1)
    return out, loss, train_op
Exemple #6
0
def evaluate(model_name, weight_file, image_size, nb_classes, batch_size, val_file_path, data_dir, label_dir,
          label_suffix='.png',
          data_suffix='.jpg'):
    current_dir = os.path.dirname(os.path.realpath(__file__))
    save_dir = os.path.join(current_dir, 'Models/'+model_name+'/res/')
    if os.path.exists(save_dir) == False:
        os.mkdir(save_dir)
    fp = open(val_file_path)
    image_list = fp.readlines()
    fp.close()

    start_time = time.time()
    inference(model_name, weight_file, image_size, image_list, data_dir, label_dir, return_results=False, save_dir=save_dir,
              label_suffix=label_suffix, data_suffix=data_suffix)
    duration = time.time() - start_time
    print('{}s used to make predictions.\n'.format(duration))

    start_time = time.time()
    conf_m, IOU, meanIOU = calculate_iou(model_name, nb_classes, save_dir, label_dir, image_list)
    print('IOU: ')
    print(IOU)
    print('meanIOU: %f' % meanIOU)
    print('pixel acc: %f' % (np.sum(np.diag(conf_m))/np.sum(conf_m)))
    duration = time.time() - start_time
    print('{}s used to calculate IOU.\n'.format(duration))
Exemple #7
0
def evaluate(net, args):
    train_file_path, val_file_path, data_dir, label_dir = get_dataset_path(
        args.dataset)
    classes = get_dataset_classes(args.dataset)
    image_suffix, label_suffix = get_dataset_suffix(args.dataset)

    current_dir = os.path.dirname(os.path.realpath(__file__))
    save_dir = 'results/%s/' % args.dataset
    save_dir += '%s/res/' % args.model_name
    if os.path.exists(save_dir) == False:
        os.mkdir(save_dir)
    fp = open(val_file_path)
    image_list = fp.readlines()
    fp.close()
    image_list = [img_name.strip('\n') for img_name in image_list]

    start_time = time.time()
    inference(net,
              args,
              image_list,
              flip=args.flip,
              return_results=False,
              save_dir=save_dir)
    duration = time.time() - start_time
    print('{}s used to make predictions.\n'.format(duration))

    start_time = time.time()
    conf_m, IOU, meanIOU = calculate_iou(args, save_dir, image_list)
    print('IOU: ')
    print(IOU)
    print('meanIOU: %f' % meanIOU)
    print('pixel acc: %f' % (np.sum(np.diag(conf_m)) / np.sum(conf_m)))
    duration = time.time() - start_time
    print('{}s used to calculate IOU.\n'.format(duration))
Exemple #8
0
def main():
    model_data = numpy.load('model.npz')
    model = Linear_Regression()
    model.b = model_data['b']
    model.W = model_data['w']
    feature_hours = model_data['feature_hours']

    inference(model, feature_hours, sys.argv[1], sys.argv[2])
def main(args):
    if args.pos1 == 'train':
        train_dataset = myDataset(os.path.join(args.train_dir, 'feature'))
        #prepare dataloader
        train_data_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=4,
            collate_fn=myDataset.get_collate_fn(args.prediction_num,
                                                args.neg_num,
                                                args.reduce_times))
        saver = pytorch_saver(10, args.save_dir)
        #build model
        model = CPC(args.input_dim,
                    args.feat_dim,
                    reduce_times=args.reduce_times,
                    prediction_num=args.prediction_num)
        if args.resume_dir != '':
            print('loading model')
            model.load_state_dict(
                pytorch_saver.load_dir(args.resume_dir)['state_dict'])

        model.train()
        model.cuda()
        args.log = os.path.join(args.save_dir, args.log)
        train(model, train_data_loader, saver, args.epochs, args.learning_rate,
              args.log)

    else:
        test_dataset = myDataset(os.path.join(args.test_dir, 'feature'),
                                 os.path.join(args.test_dir, 'phn_align.pkl'))
        test_data_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=4,
            collate_fn=myDataset.get_collate_fn(args.prediction_num,
                                                args.neg_num,
                                                args.reduce_times,
                                                train=False))
        if args.resume_dir == '':
            print("resume should exist in inference mode", file=sys.stderr)
            sys.exit(-1)
        else:
            model = CPC(args.input_dim,
                        args.feat_dim,
                        reduce_times=args.reduce_times,
                        prediction_num=args.prediction_num)
            print('loading model')
            model.load_state_dict(
                pytorch_saver.load_dir(args.resume_dir)['state_dict'])
            model.eval()
            model.cuda()

            inference(model, test_data_loader, args.result_dir,
                      args.reduce_times)
Exemple #10
0
def main(config, resume):
    train_logger = Logger()

    data_loader = ProteinDataLoader(config,
                                    name='train',
                                    shuffle=True,
                                    evaluation=False)
    valid_data_loader = None  # data_loader.split_validation()

    if False:  # config['similarity_approach']:
        assert config['sampling'] == 'uniform'
        assert config['loss'] == 'histogram_loss'
        model = RetrievalModel(config, data_loader).cuda()
    else:
        model = NatashaProtein(config=config).cuda()

    loss = eval(config['loss'])
    metrics = [eval(metric) for metric in config['metrics']]

    if True:
        print('start training')
        if False:  # config['similarity_approach']:
            trainer = SimilarityTrainer(model,
                                        loss,
                                        metrics,
                                        resume=resume,
                                        config=config,
                                        data_loader=data_loader,
                                        valid_data_loader=valid_data_loader,
                                        train_logger=train_logger)
        else:
            trainer = Trainer(model,
                              loss,
                              metrics,
                              resume=resume,
                              config=config,
                              data_loader=data_loader,
                              valid_data_loader=valid_data_loader,
                              train_logger=train_logger)
        print('batch_size ', config['data_loader']['batch_size_train'])
        trainer.train()

    print('Create test loader')
    test_data_loader = ProteinDataLoader(config, name='test')
    checkpoint_for_model = torch.load('saved/Protein/model_best.pth.tar')
    # checkpoint_for_model = torch.load('saved/Protein/model_best-resnent-50-lb-0.383.pth.tar')
    # checkpoint_for_model = torch.load('saved/NatashaSegmentation/model_best-resnet-34-wo-depth-bce-0.0063.pth.tar')
    model.load_state_dict(checkpoint_for_model['state_dict'])
    # print(model.state_dict())
    model.eval()

    print('Do inference')

    if False:  # config['similarity_approach']:
        inference_for_knn(config)
    else:
        inference(test_data_loader, model)
Exemple #11
0
def vanilla_validation(q_dataloader, q_txt, g_dataloader, g_txt, base_net,
                       rank_size):
    q_features = inference.inference(q_dataloader, base_net)
    g_features = inference.inference(g_dataloader, base_net)
    CMC, mAP = Vanilla_Cmc(q_features,
                           q_txt,
                           g_features,
                           g_txt,
                           rank_size=rank_size)
    return CMC[0], mAP
Exemple #12
0
def main():

    envir = json.loads(sys.argv[1])
    #with open('input.json') as envir_input:
    #envir=json.load(envir_input)
    #envir_input.close()

    player_now, pot_oper, gem_num, gold_num, gem_of_player, card_of_player = trans_envir_now(
        envir)

    input_vec = envir2vec(envir)

    win_est_now = inference(input_vec)

    ope_value = []
    input_vec = []
    for i in range(len(pot_oper)):
        envir_new = change_envir(pot_oper[i], player_now, envir)
        input_vec.append(envir2vec(envir_new))
    win_est_new = inference(input_vec)
    for i in range(len(pot_oper)):
        ope_value.append(oper_value(player_now, win_est_new[i], win_est_now))

    opt_ope = 0
    max_value = ope_value[0]
    for i in range(len(ope_value)):
        if max_value < ope_value[i]:
            max_value = ope_value[i]
            opt_ope = i
    opt_oper = pot_oper[opt_ope]

    envir_new = change_envir(opt_oper, player_now, envir)
    dividend_of_player = {
        'red': 0,
        'green': 0,
        'blue': 0,
        'black': 0,
        'white': 0
    }
    if "purchased_cards" in envir_new['players'][player_now].keys():
        for card in envir_new['players'][player_now]["purchased_cards"]:
            dividend_of_player[card['color']] += 1

    if 'nobles' in envir_new.keys():
        for noble in envir_new['nobles']:
            if_ava = True
            for gem in noble["requirements"]:
                if dividend_of_player[gem['color']] < gem['count']:
                    if_ava = False
            if if_ava:
                opt_oper['noble'] = noble
                break
    print(json.dumps(opt_oper))
Exemple #13
0
 def testAttentionModel(self):
     hparams = common_test_utils.create_test_hparams(
         encoder_type="uni",
         num_layers=1,
         attention="scaled_luong",
         attention_architecture="standard",
         use_residual=False,
     )
     ckpt_path = self._createTestInferCheckpoint(hparams, "attention_infer")
     infer_file = "nmt/testdata/test_infer_file"
     output_infer = os.path.join(hparams.out_dir, "output_infer")
     inference.inference(ckpt_path, infer_file, output_infer, hparams)
     with open(output_infer) as f:
         self.assertEqual(5, len(list(f)))
Exemple #14
0
def run(label, features, target):
    model = Linear_Regression()

    loss_data = model.train(features, target, epochs=10000)
    print('label={}, loss={}'.format(label, loss_data[-1]))

    def feature_selection(X_test):
        if label != 'pm25_only':
            return X_test

        return X_test[..., 9::18]

    inference(model, feature_hours, './data/test.csv',
              f'./submission_q2_{label}.txt', feature_selection)
Exemple #15
0
 def testBasicModelWithInferIndices(self):
     hparams = common_test_utils.create_test_hparams(
         encoder_type="uni",
         num_layers=1,
         attention="",
         attention_architecture="",
         use_residual=False,
         inference_indices=[0])
     ckpt_path = self._createTestInferCheckpoint(
         hparams, "basic_infer_with_indices")
     infer_file = "nmt/testdata/test_infer_file"
     output_infer = os.path.join(hparams.out_dir, "output_infer")
     inference.inference(ckpt_path, infer_file, output_infer, hparams)
     with open(output_infer) as f:
         self.assertEqual(1, len(list(f)))
Exemple #16
0
def evaluate(x_test, y_test):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [100, 28, 28, 1], name='x-test')
        y = tf.placeholder(tf.int32, [100], name='y-test')

        y_hat = inference.inference(x, regularizer=None, training=False)

        correct = tf.equal(y, tf.cast(tf.argmax(y_hat, 1), tf.int32))
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

        var_avg = tf.train.ExponentialMovingAverage(train.moving_avg_decay)
        var_to_restore = var_avg.variables_to_restore()
        saver = tf.train.Saver(var_to_restore)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(train.model_path)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    acc = sess.run(accuracy, feed_dict={x: x_test, y: y_test})
                    cur_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                        '-')[-1]
                    print('After {} step(s), acc = {}'.format(cur_step, acc))
                    if int(cur_step) >= train.max_step:
                        return
                else:
                    print("No checkpoint file found")
                    return
            time.sleep(time_delay)
Exemple #17
0
def predict(labels):
    x = tf.placeholder(
        tf.float32,
        [None,
        IMAGE_SIZE,
        IMAGE_SIZE,
        NUM_CHANNELS]
    )

    y = inference.inference(x, False, None)

    prediction_index = tf.argmax(y, 1)[0]
    
    saver = tf.train.Saver()
    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(
            MODEL_PATH
        )
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(
                sess, ckpt.model_checkpoint_path
            )
        img_data = read_pic().eval()
        # test input image
        print(np.shape(img_data))
        plt.imshow(np.reshape(img_data,
            [IMAGE_SIZE, IMAGE_SIZE]))
        plt.show()
        # end test

        prediction_index_value = sess.run(prediction_index, {x: img_data})
        print(labels[prediction_index_value])
Exemple #18
0
def evaluate(mnist):

    x = tf.placeholder(tf.float32, [None, train.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, train.OUTPUT_NODE], name='y-input')
    validate_x, validate_y = mnist.test.images, mnist.test.labels

    y = inference.inference(x, keep_prob=1)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()

    # while True:
    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            global_step = ckpt.model_checkpoint_path.split("/")[-1].split(
                "-")[-1]
            accuracy_score = sess.run(accuracy,
                                      feed_dict={
                                          x: validate_x,
                                          y_: validate_y
                                      })
            print("After %s training step, validation accuracy=%g" %
                  (global_step, accuracy_score))
        else:
            print("No checkpoint file found")
            return time.sleep(EVAL_INTERVAL_SECS)
def prediction(dataset):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(
            tf.float32, [None, inference.INPUT_NODE], name='x-input')

        y = inference.inference(x, None)
        prediction = tf.cast(tf.sigmoid(y) > 0.5, tf.int32)
        # probability = tf.sigmoid(y)

        prediction_feed = {x: dataset['test_x']}
        variable_averages = tf.train.ExponentialMovingAverage(
            train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                submit = pd.concat(
                    [
                        dataset['test_y'],
                        pd.DataFrame(
                            sess.run(prediction, feed_dict=prediction_feed),
                            columns=['Survived'])
                    ],
                    axis=1)
                submit.to_csv('../output/submit.csv', index=False)
                print('----------------------------------')
                print('Predict and output successfully...')
                print('----------------------------------')
            else:
                print('---------------------------')
                print('No checkpoint file found...')
                print('---------------------------')
Exemple #20
0
def evaluate(mnist):
    # 定义输入输出的格式
    x = tf.placeholder(tf.float32, [None, inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, inference.OUTPUT_NODE],
                        name='y-input')
    validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}

    #第二个参数是正则化损失
    y = inference.inference(x, None)

    #计算正确率
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    #?
    variable_averages = tf.train.ExponentialMovingAverage(
        train.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    while True:
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                print("After %s training step(s), valiadation accuracy = %g" %
                      (global_step, accuracy_score))
            else:
                print('No checkpoint file found')
                return
            time.sleep(EVAL_INTERVAL_SECS)
Exemple #21
0
def main():
    numpy.set_printoptions(threshold=numpy.nan)

    feature_hours = 1

    data = read_data()
    features = extract_features(data, feature_hours)
    target = extract_target(data, feature_hours, pm25_row=9)

    for regularization_term in (0, 0.01, 0.1, 10, 100, 1000):
        model = Linear_Regression(regularization_term)

        loss_data = model.train(features, target, epochs=10000)
        print('lambda={}, loss={}'.format(regularization_term, loss_data[-1]))
        inference(model, feature_hours, './data/test.csv',
                  f'./submission_q3_{regularization_term}.txt')
Exemple #22
0
def evaluate():
    logger = process.get_logger()

    with tf.Graph().as_default() as g:

        x = tf.placeholder(
            tf.int32,
            [None, None],
            name="x-input"
        )
        
        y = inference.inference(x, process.get_lex_len(), None)

        tf.argmax(y, 1)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(
                const.MODEL_DIR)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path\
                                    .split("/")[-1].split("-")[-1]
                feed_x = process.get_vec_from_text(const.PREDICT_TEXT)
                # feed_x = feed_x.any()
                y = sess.run(y, feed_dict={x: feed_x})
                print("After %s training step(s), text: '%s', "
                        "prediction: %s" % (global_step, const.PREDICT_TEXT, str(y)))
                logger.info("After %s training step(s), text: '%s', "
                    "prediction = %s" % (global_step, const.PREDICT_TEXT, str(y)))
            else:
                print("No checkpoint file found")
                return
Exemple #23
0
def train():
    x = tf.placeholder(
        tf.float32, [None, IMG_SIZE, IMG_SIZE, 3], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, 2], name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZATION_RATE)
    # 训练时, train=True,则会启动dropout
    # 给regularizer赋值则会将加入正则化,weights被正则化后加入集合losses当中
    y = infer.inference(x, train=True, regularizer=regularizer)
    global_step = tf.Variable(0, trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    # 准确率计算
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    # 交叉熵
    '''cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,
                                                                   labels=tf.arg_max(y_, 1)'''
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y,
                                                                   labels=y_)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    # 损失函数
    losses = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    # 学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step,
                                               DECAY_STEPS, LEARNING_RATE_DECAY)
    train_step = tf.train.AdamOptimizer(
        learning_rate).minimize(losses, global_step=global_step)
    # 下面两行不知何用
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        outcome = {}  # 初始化输出结果的字典,之后会被保存为csv文件
        for i in range(TRAINING_STEPS):
            xs, ys = next_batch(BATCH_SIZE, train_x, train_y)
            _, loss_value, step = sess.run([train_op, losses, global_step],
                                           feed_dict={x: xs, y_: ys})
            if i % 300 == 0:

                accuracy_train = sess.run(accuracy,
                                          feed_dict={x: xs, y_: ys})
                accuracy_test = sess.run(accuracy,
                                         feed_dict={x: test_x, y_: test_y})
                outcome['train'+str(step)] = accuracy_train
                outcome['test'+str(step)] = accuracy_test
                save_parameters_as_csv(outcome, "./paras.csv")
                print("After %d steps, accuracy on train set is %g" %
                      (step, accuracy_train))
                print("After %d steps, accuracy on test set is %g" %
                      (step, accuracy_test))
                print("After %d steps, loss on training batch is %g" %
                      (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
        if KeyboardInterrupt:
            sys.exit()
Exemple #24
0
def evaluate(minist): # 需要加载图操作吗?
    x = tf.placeholder(tf.float32, shape=[None, inference.INPUT_NODE], name='x_input')
    y_ = tf.placeholder(tf.float32, shape=[None, inference.OUTPUT_NODE], name='y_output')
    validation_feed = {x: minist.validation.images, y_: minist.validation.labels}

    y = inference.inference(x, None)
    correct_preditction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1)) # 传进去的第二个参数实际上是y的滑动平均值,y是影子变量的重命名
    accuracy = tf.reduce_mean(tf.cast(correct_preditction, tf.float32))

    # 将影子变量重命名
    ema = tf.train.ExponentialMovingAverage(train.MOVING_AVERAGE_DECAY)
    saver = tf.train.Saver(ema.variables_to_restore())
    # saver = tf.train.Saver()
    # saver = tf.train.Saver({'layer2/add/ExponentialMovingAverage': y})
    while True:
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                accuracy_value, y1 = sess.run([accuracy, y], feed_dict=validation_feed)
                print('y: ', y)
                print('y = ', y1)
                print('After %s training steps, validation accuracy is %f' % (global_step, accuracy_value))
            else:
                print('There is no checkpoint files!')
                return
            time.sleep(EVAL_INTERVAL_SECS)
Exemple #25
0
def evaluate(test_xs, test_ys):
    x = tf.placeholder(
        tf.float32,
        [None, const.IMAGE_HEIGHT, const.IMAGE_WIDTH, const.NUM_CHANNELS],
        "x-input")
    y_ = tf.placeholder(tf.float32, [None, inference.NUM_LABELS], "y_-input")
    global_step = tf.Variable(0, False)

    y = inference.inference(x, False, None)

    reshaped_y = tf.reshape(y, [-1, const.MAX_CAPTCHA, const.CHAR_SET_LEN])
    reshaped_y_ = tf.reshape(y_, [-1, const.MAX_CAPTCHA, const.CHAR_SET_LEN])
    max_idx_p = tf.argmax(reshaped_y, 2)
    max_idx_l = tf.argmax(reshaped_y_, 2)

    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(max_idx_l, max_idx_p), tf.float32))

    saver = tf.train.Saver()
    with tf.Session() as sess:

        tf.global_variables_initializer()
        ckpt = tf.train.get_checkpoint_state(const.MODEL_DIR)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            # global_step = int(ckpt.model_checkpoint_path.split('.')[-1].split('-')[-1])
        else:
            print('No checkpoint file found')
            sess.close()
            return

        acc_value = sess.run(accuracy, {x: test_xs, y_: test_ys})

        return (global_step.eval(), acc_value)
def main(arg=None):
    images, labels = image_loader.read_batch()
    logits = inference.inference(images)
    loss = ls.loss(logits, labels)

    saver = tf.train.Saver()

    summary_opt = tf.summary.merge_all()

    init = tf.global_variables_initializer()

    sess = tf.InteractiveSession()

    sess.run(init)
    tf.train.start_queue_runners(sess=sess)

    summary_writer = tf.summary.FileWriter(flag.log_dir, graph=sess.graph)

    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    for i in xrange(5001):
        if i % 100 == 0:
            print 'step {0}, loss: {1}'.format(i, sess.run(ls.get_loss()))
        sess.run(train_step)
        if i % 50 == 0:
            summary_str = sess.run(summary_opt)
            summary_writer.add_summary(summary_str, i)

    saver.save(sess=sess, save_path=flag.save_dir)
    summary_writer.close()
Exemple #27
0
def run_classification(regions,bboxes,num_regions=1,class_thresh=0.5,num_scores=0):
    tf.reset_default_graph()
    pic = tf.constant(regions,dtype=tf.float32)
    config = tf.ConfigProto(log_device_placement=False)
    config.gpu_options.per_process_gpu_memory_fraction = 0.7
    config.gpu_options.allow_growth = True
    score, variables = inference.inference(pic, False)
    obj=[]
    with tf.Session(config=config) as sess:
        #tf.summary.FileWriter('logs/',sess.graph)
        tf.local_variables_initializer().run()
        saver = tf.train.Saver(var_list=variables)
        tf.global_variables_initializer().run()
        try:
            saver.restore(sess, constants.ROOT +'classification.ckpt')
        except Exception as e:
            print(str(e))
            print('Cannot restore!')
        scores = sess.run(score)
        for i in range(num_regions):
            try:
                print('scores='+str(scores[i]))
                #print('answers='+str(lbl[i]))
                cls=np.argmax(scores[i])
                if scores[i,cls]<class_thresh: continue#obj.append([0,0.0]+bboxes[i][num_scores:])
                else: obj.append([cls+1,scores[i,cls]]+bboxes[i][num_scores:])
            except:
                break
    return obj
Exemple #28
0
def evaluate(x_dev, y_dev, iteration):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(
            tf.float32,
            [
                None,  # batch_size
                inference.MAX_LEN_DOC,  # 一个文档最多几句话
                inference.SENTENCE_LEN
            ],  # sentence2vec维度对应channels
            name='x-input')
        y_ = tf.placeholder(tf.float32, [None, inference.MAX_LEN_DOC],
                            name='y-input')
        validate_feed = {x: x_dev, y_: y_dev}

        # 因为是验证集,不加正则化
        y = inference.inference(x)
        prediction = tf.cast(y >= 0.5, tf.float32)
        correct_prediction = tf.equal(prediction, y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        saver = tf.train.Saver()
        with tf.Session() as sess:
            # tf.train.get_checkpoint_state通过checkpoint文件自动找到目录中最新模型文件名
            ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                # 加载模型
                saver.restore(sess, ckpt.model_checkpoint_path)
                accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                print("After %s training step(s), validation accuracy = %g" %
                      (iteration, accuracy_score))
            else:
                print('No checkpoint file found')
                return
Exemple #29
0
def inference(base_path, config):
    results = inference_script.inference(base_path, config)

    # Path exsitence check during inference
    segmentation_images_path = config.SEGMENTATION_IMAGES_PATH
    output_path = os.path.join(base_path, config.OUTPUT_PATH)

    if os.path.exists(output_path):
        shutil.rmtree(output_path)

    os.makedirs(output_path)

    for i, result in enumerate(results):
        image_filename = result["image"]
        image_filename_without_extension = os.path.splitext(image_filename)[0]
        # TODO: only 0 as long as only one image is detected and images are not batched for inference
        prediction = result["prediction"]["obj_coords"]
        segmentation_image_filename = result["segmentation_image"]
        segmentation_image = cv2.imread(
            os.path.join(segmentation_images_path,
                         segmentation_image_filename))
        shrunk_segmentation_image = model_util.shrink_image_with_step_size(
            segmentation_image, prediction.shape)
        final_image = np.zeros(prediction.shape, dtype=np.float16)
        indices = np.where(
            shrunk_segmentation_image == config.SEGMENTATION_COLOR)
        final_image[indices] = prediction[indices]
        tiff.imsave(
            os.path.join(
                output_path,
                "{}_obj_coords.tiff".format(image_filename_without_extension)),
            final_image)
Exemple #30
0
def predict(mnist, labels):
    x = tf.placeholder(tf.float32, [
        None, inference.IMAGE_SIZE, inference.IMAGE_SIZE,
        inference.NUM_CHANNELS
    ], "x-input")
    y_ = tf.placeholder(tf.int64, [None], "y_-input")
    y = inference.inference(x, False, None)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(train.MODEL_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        valid_num = np.random.randint(mnist.validation.num_examples)
        validate_image = mnist.validation.images[valid_num]
        validate_label = mnist.validation.labels[valid_num]
        reshaped_image = np.reshape(validate_image, [
            -1, inference.IMAGE_SIZE, inference.IMAGE_SIZE,
            inference.NUM_CHANNELS
        ])
        reshaped_label = np.reshape(validate_label, [-1])
        prediction_index = sess.run(tf.argmax(y, 1), {
            x: reshaped_image,
            y_: reshaped_label
        })

        print("correct answer: \t%s" % (labels[reshaped_label[0]]))
        print("prediction: \t%s" % (labels[prediction_index[0]]))

        image_forShow = np.reshape(
            validate_image, [inference.IMAGE_SIZE, inference.IMAGE_SIZE])
        plt.imshow(image_forShow)
        plt.show()
Exemple #31
0
    def test(self, x):
        x_min = tf.Variable(np.zeros(x.shape[1]),
                            dtype=tf.float32,
                            name='x_min')
        x_max = tf.Variable(np.zeros(x.shape[1]),
                            dtype=tf.float32,
                            name='x_max')
        y_min = tf.Variable([0], dtype=tf.float32, name='y_min')
        y_max = tf.Variable([0], dtype=tf.float32, name='y_max')

        input_x = tf.placeholder(tf.float32, [None, self.n_in], name='x-input')
        y = inference.inference(input_x, None, self.n_in, self.n_out,
                                self.n_hidden)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state('ckpt')
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                x_min, x_max, y_min, y_max = sess.run(
                    [x_min, x_max, y_min, y_max])
                x = (x - x_min) / (x_max - x_min)
                result = sess.run(y, feed_dict={input_x: x})
                result = result * (y_max - y_min) + y_min
                print(result)
def main(fileIndex,useTestData,getTrees,order = 1,language = english):
    fileIndex = str(fileIndex)
    inputFile = ""
    if language == english:
        if order == 1:
            inputFile += "./data/1stOrder/"
        elif order == 2:
            inputFile += "./data/2ndOrder/"
        if useTestData:
            inputFile += "test/"
        else:
            inputFile += "dev/"
    else:
        inputFile += "./data/" + language + str(order) + "/"
        
    inputFile += "output_" + fileIndex + ".txt"
    g = DiGraph(inputFile)
    
#     print "iter Num =", fileIndex, "n =", g.n
    
    optTree = [];
    goldTree = [];
    optHeads = g.optHeads
    goldHeads = g.goldHeads
    
    for i in range(0,g.n):
        v = i + 1
        goldu = int(goldHeads[i])
        optu  = int(optHeads[i])
        goldTree.append((goldu,v))
        optTree.append((optu,v))

    w = {}
    for arc in g.partsManager.getArcs():
        w[arc.u,arc.v] = arc.val
    
    inf                 = inference(w,g.n,g.partsManager);
#     t1                  = time.clock()
#     optGProj            = inf.eisnerProjective()
#     t2                  = time.clock()
#     optGNonProj         = inf.chuLiuEdmondsWrapper()
    t3                  = time.clock()
#     optGgreedyMinLoss   = inf.greedyMinLoss()
#     if fileIndex == '42':
#         print "0,2"
    optGgreedyMinLoss   = inf.greedyMinLossTake2(order)
    t4                  = time.clock()
#     optGtwoSidedMinLoss = inf.twoSidedMinLoss()
#     t5                  = time.clock()
    
    goldHeads           = map(lambda u: int(u), goldHeads)
    optHeads            = map(lambda u: int(u), optHeads)
    outHeads = {'projInference':[],'nonProj': [], 'minLoss': [],'2SidedMinLoss': []}
    out = {}
#     inferenceTypes = [(optGProj,'projInference',t2 - t1), (optGNonProj,'nonProj',t3 - t2), (optGgreedyMinLoss,'minLoss', t4 - t3)\
#                       , (optGtwoSidedMinLoss,'2SidedMinLoss',t5 - t4)]
    inferenceTypes = [(optGgreedyMinLoss,'minLoss', t4 - t3)]
    for (optG, keyName,t) in inferenceTypes: 
        if optG is None:
            out[keyName] = None
            continue
        optEdges            = optG.edges()
        nInfGoldCorrect     = 0
        nInfOptCorrect      = 0
        noptGoldCorrect     = 0
        for i in range(0,g.n):
            v           = i + 1
            goldu       = goldHeads[i]
            optu        = optHeads[i]
            optEdge     = filter(lambda (u_j,v_j): v_j == v, optEdges)
            infu        = optEdge[0][0]
            outHeads[keyName].append(infu)
            
            if infu == goldu:
                nInfGoldCorrect += 1 
            if infu == optu:
                nInfOptCorrect += 1
            if optu == goldu:
                noptGoldCorrect += 1
           
        out[keyName] = {'ninfGold': nInfGoldCorrect, 'ninfOpt': nInfOptCorrect, 'noptGold': noptGoldCorrect, \
                        'n': g.n, 'inferenceTime': t}
        if getTrees:
            out['goldHeads']        = goldHeads
            out['optHeads']         = optHeads
            out['projOptHeads']     = outHeads['projInference']
            out['nonProjOptHeads']  = outHeads['nonProj']
            out['greedyOptHeads']   = outHeads['minLoss']
    
    return out
def train(mnist):
    # 实现模型
    x = tf.placeholder(
        tf.float32, [
            BATCH_SIZE, inference.IMAGE_SIZE, inference.IMAGE_SIZE,
            inference.NUM_CHANNELS
        ],
        name='x-input')  # 输入层
    y_ = tf.placeholder(
        tf.float32, [None, inference.OUTPUT_NODE], name='y-input')  # 标签
    regularizer = tf.contrib.layers.l2_regularizer(
        REGULARIZATION_RATE)  # 定义L2正则化损失函数
    y = inference.inference(x, True, regularizer)  # 输出层

    # 存储训练轮数,设置为不可训练
    global_step = tf.Variable(0, trainable=False)

    # 设置滑动平均方法
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)  # 定义滑动平均类
    variable_averages_op = variable_averages.apply(
        tf.trainable_variables())  # 在所有可训练的变量上使用滑动平均

    # 设置指数衰减法
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)

    # 最小化损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))  # 计算每张图片的交叉熵
    cross_entropy_mean = tf.reduce_mean(cross_entropy)  # 计算当前batch中所有图片的交叉熵平均值
    loss = cross_entropy_mean + tf.add_n(
        tf.get_collection('losses'))  # 总损失等于交叉熵损失和正则化损失的和
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)  # 优化损失函数

    # 同时反向传播和滑动平均
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化持久化类
    saver = tf.train.Saver()

    # 开始训练
    with tf.Session() as sess:
        # 初始化所有变量
        tf.global_variables_initializer().run()

        # 迭代训练
        for i in range(TRAINING_STEPS):
            # 产生该轮batch
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            xs = np.reshape(
                xs, (BATCH_SIZE, inference.IMAGE_SIZE, inference.IMAGE_SIZE,
                     inference.NUM_CHANNELS))  # 将MNIST数据格式转为四维矩阵
            _, loss_value, step = sess.run(
                [train_op, loss, global_step], feed_dict={
                    x: xs,
                    y_: ys
                })

            # 每1000轮保存一次模型
            if i % 1000 == 0:
                # 输出训练情况
                print('After %d training steps, loss is %g.' % (step,
                                                                loss_value))

                # 保存当前模型
                saver.save(
                    sess,
                    os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                    global_step=global_step)
Exemple #34
0
def main(fileIndex,writeCsvFile,verbose,applyPositiveSlacks,order,useGoldHeads,useTestData,getTrees,alpha,proj):
    fileIndex = str(fileIndex)
#     inputFile = "./data/output_" + fileIndex + ".txt"
    inputFile = "./data/"
    if order == 3:
        inputFile += "3rdOrder/"
    elif order == 2:
        inputFile += "2ndOrder/"
    elif order == 1:
        inputFile += "1stOrder/"
    else:
        assert False, "wrong model order, should be 1,2 or 3 - given: '" + str(order) + "'"
    if useTestData:
        inputFile += "test/"
    else:
        inputFile += "dev/"
        
    inputFile += "output_" + fileIndex + ".txt"
    outputFileName = "./output/file_" + fileIndex + ".csv"
    g = DiGraph(inputFile)
    lpm = LPMaker(g, "try_input_" + fileIndex, "input_" + fileIndex + ".lp")
    
    bestTree = [];
#     print "n =",g.n
    gHeads = g.optHeads
    if useGoldHeads:
        gHeads = g.goldHeads
#     if g.n > 15:
#         return
    for i in range(0,g.n):
        v = i + 1
        u = int(gHeads[i])
        bestTree.append((u,v))
        if verbose: 
            print "best tree added (" + str(u) + "," + str(v) + ")"
    lpm.createLP(proj, bestTree,applyPositiveSlacks,alpha)
    lpm.lpFile = None
    lpm.solve(verbose)
    if lpm.model.status != gp.GRB.status.OPTIMAL:
        print "\n\nINFEASIBLE: file ID =", fileIndex
        lpm.model.computeIIS()
        for c in lpm.model.getConstrs():
            if c.getAttr(gp.GRB.Attr.IISConstr) > 0:
                print c.getAttr(gp.GRB.Attr.ConstrName),c.getAttr(gp.GRB.Attr.Sense),c.getAttr(gp.GRB.Attr.RHS)
        return 

    w = {}
    for (u,v) in lpm.newWeights.keys():
        w[u,v] = lpm.newWeights[u,v]
    
    inf = inference(w,g.n);
    optGProj            = inf.eisnerProjective()
    optGNonProj         = inf.chuLiuEdmondsWrapper()
    optGgreedyMinLoss   = inf.greedyMinLoss() 
    
    goldHeads = map(lambda u: int(u), g.goldHeads)
    origOptHeads = map(lambda u: int(u), g.optHeads)
    outHeads = {'projInference':[],'nonProj': [], 'minLoss': []}
    if writeCsvFile:
        csvfile = open(outputFileName, 'wb')
        fieldnames = ["childIndex","goldHead","highOrderOptHead","optHead","LP Head"] 
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
    out = {}
    for (optG, keyName) in [(optGProj,'projInference'), (optGNonProj,'nonProj'), (optGgreedyMinLoss,'minLoss')]: 
        optEdges = optG.edges()
        edgesFromLp = lpm.optEdges
        nOptGoldCorrect = 0
        nOptOrigOptCorrect = 0
        nLPGoldCorrect = 0
        nLP_OPTCorrect = 0
        nLP_OrigOptCorrect = 0
        nOrigOptGoldCorrect = 0
        for i in range(0,g.n):
            v = i + 1
            goldu = goldHeads[i]
            origOptu = origOptHeads[i]
            optEdge = filter(lambda (u_j,v_j): v_j == v, optEdges)
            optU = optEdge[0][0]
            outHeads[keyName].append(optU)
            edgeFromLp = filter(lambda (u_j,v_j): v_j == v, edgesFromLp)
            LPU = edgeFromLp[0][0]
            line = {"childIndex": v,"goldHead": goldu,"highOrderOptHead": origOptu, "optHead": optU,"LP Head": LPU}
    #         print line
            if writeCsvFile:
                writer.writerow(line)
            if optU == goldu:
                nOptGoldCorrect += 1 
            if optU == origOptu:
                nOptOrigOptCorrect += 1
            if LPU == goldu:
                nLPGoldCorrect += 1
            if LPU == optU:
                nLP_OPTCorrect += 1
            if LPU == origOptu:
                nLP_OrigOptCorrect += 1
            if origOptu == goldu:
                nOrigOptGoldCorrect += 1
            
        if writeCsvFile:
            csvfile.close
    #     print nOptCorrect,nLPCorrect,nLP_OPTCorrect
#         normalizationFactor = float(g.n)
        out[keyName] = {'noptGold': nOptGoldCorrect, 'nLPGold': nLPGoldCorrect, 'n_LP_OPT': nLP_OPTCorrect, \
               'norigOptGold': nOrigOptGoldCorrect, 'nOptOrigOpt': nOptOrigOptCorrect, 'nLpOrigOpt': nLP_OrigOptCorrect, 'n': g.n}
        if getTrees:
            out['goldHeads'] = goldHeads
            out['highOrderOptHeads'] = origOptHeads
            out['projOptHeads'] = outHeads['projInference']
            out['nonProjOptHeads'] = outHeads['nonProj']
    
    return out