コード例 #1
0
 def __init__(self):
     self.base_config = BaseConfig()
     self.bert_config = BertConfig()
     self.textcnn_config = TextCNNConfig()
     self.batch_size = 128
     self.epochs = 10
     self.lr = 1e-3
     self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
     self.T = 10  # 调整温度
     self.alpha = 0.9  # 调整soft_target loss 和 hard_target loss 比重
     self.model_path = "./model/textcnn_kd.pth"
     self.log_path = "./log/train_kd.log"
コード例 #2
0
 def __init__(self):
     self.base_config = BaseConfig()
     self.device = torch.device(
         "cuda:0" if torch.cuda.is_available() else "cpu")
     self.label2idx = load_json(self.base_config.label2idx_path)
     self.class_num = len(self.label2idx)
     self.epochs = 4
     self.lr = 2e-5
     self.hidden_size = 768
     self.dropout = 0.1
     self.batch_size = 32
     self.model_path = "./model/bert.pth"
     self.log_path = "./log/train_bert.log"
コード例 #3
0
ファイル: fs_consumer2.py プロジェクト: jezaustin/fs-python
    def __init__(self,
                 consumer,
                 consumer_id,
                 topic_list=[],
                 config=BaseConfig.config(),
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)

        self.config = config
        self.consumer = consumer
        self.consumer_id = consumer_id
        self.topic_list = topic_list
        self.peak_memory_mb = self.get_peak_memory()
        print(
            "[FSConsumer2] - consumer_id={}, topic_list={}, config={}".format(
                self.consumer_id, self.topic_list, self.config))
コード例 #4
0
 def __init__(self):
     self.base_config = BaseConfig()
     self.label2idx = load_json(self.base_config.label2idx_path)
     self.word2idx = load_json(self.base_config.word2idx_path)
     self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')   # 设备
     self.dropout = 0.5                                              # 随机失活
     self.num_classes = len(self.label2idx)                          # 类别数
     self.n_vocab = self.base_config.vocab_size                      # 词表大小
     self.num_epochs = 10                                            # epoch数
     self.batch_size = 128                                           # mini-batch大小
     self.learning_rate = 1e-3                                       # 学习率
     self.embed_size = 300                                           # embed_size
     self.filter_sizes = (2, 3, 4)                                   # 卷积核尺寸
     self.num_filters = 128                                          # 卷积核数量(channels数)
     self.embedding_pretrained = None                                # 预训练 word embedding
     self.model_path = "./model/textcnn.pth"                         # 模型保存路径
     self.log_path = "./log/train_textcnn.log"                       # 训练log
if __name__ == '__main__':
    arg_db_name = 'imagenet'
    arg_net = 'densenet161'  #[densenet161,inceptionv1,resnet50] ## For NOW only denseNet is supported
    args = [
        '--gpu',
        '0',
        '--output_dir',
        './output_heatmaps/',
        '--db_name',
        arg_db_name,
        '--img_name',
        'ILSVRC2012_val_00000021.JPEG',  #[ILSVRC2012_val_00000021.JPEG,cute_dog.jpg]
        # '--print_filter_name',
        '--net',
        arg_net,
        '--caf_variant',
        'cls_specific',  #[cls_oblivious,cls_specific]
        '--learning_rate',
        '0.5',
        '--max_iters',
        '1000',
        '--filter_type',
        'l2norm',  #['l2norm,softmax,gauss]
        '--replicate_net_at',
        'densenet161/dense_block4/conv_block24/concat:0',
        '--atten_filter_position',
        'dense_block4/{}_conv_block24:0'  # last conv DenseNet
    ]
    cfg = BaseConfig().parse(args)
    assert cfg.net == 'densenet161'
    main(cfg)
コード例 #6
0
ファイル: test.py プロジェクト: piperod/softmax_triplet_loss
def main(argv):

    cfg = BaseConfig().parse(argv)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu
    img_generator_class = locate(cfg.db_tuple_loader)
    args = dict()
    args['db_path'] = cfg.db_path
    args['tuple_loader_queue_size'] = cfg.tuple_loader_queue_size
    args['preprocess_func'] = cfg.preprocess_func
    args['batch_size'] = cfg.batch_size
    args['shuffle'] = False
    args['img_size'] = const.max_frame_size
    args['gen_hot_vector'] = True

    args['batch_size'] = cfg.batch_size
    args['csv_file'] = cfg.test_csv_file
    test_iter = img_generator_class(args)

    test_imgs, test_lbls = test_iter.imgs_and_lbls()
    cfg = BaseConfig().parse(argv)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu
    model_dir = cfg.checkpoint_dir
    print(model_dir)
    log_file = os.path.join(cfg.checkpoint_dir, cfg.log_filename + '_test.txt')
    logger = log_utils.create_logger(log_file)
    with tf.Graph().as_default():
        #meta_file = os.path.join(model_dir,'model.ckptbest.meta')
        #saver = tf.train.import_meta_graph(meta_file)
        #ckpt_file = os.path.join(model_dir,'model.ckptbest')
        #saver.restore(sess,ckpt_file)

        #print('Model Path {}'.format(ckpt_file))
        #load_model_msg = model.load_model(model_dir, ckpt_file, sess, saver, load_logits=True)
        #logger.info(load_model_msg)
        #graph = tf.get_default_graph()
        #print(graph.get_operations())

        test_dataset = QuickTupleLoader(test_imgs,
                                        test_lbls,
                                        cfg,
                                        is_training=False,
                                        repeat=False).dataset
        handle = tf.placeholder(tf.string, shape=[])
        iterator = tf.data.Iterator.from_string_handle(
            handle, test_dataset.output_types, test_dataset.output_shapes)
        images_ph, lbls_ph = iterator.get_next()

        network_class = locate(cfg.network_name)
        model = network_class(cfg, images_ph=images_ph, lbls_ph=lbls_ph)
        validation_iterator = test_dataset.make_initializable_iterator()

        sess = tf.InteractiveSession()
        validation_handle = sess.run(validation_iterator.string_handle())
        ckpt_file = tf.train.latest_checkpoint(model_dir)
        print(ckpt_file)
        tf.global_variables_initializer().run()
        saver = tf.train.Saver()
        load_model_msg = model.load_model(model_dir,
                                          ckpt_file,
                                          sess,
                                          saver,
                                          load_logits=True)
        print(load_model_msg)

        ckpt_file = os.path.join(model_dir, cfg.checkpoint_filename)

        val_loss = tf.summary.scalar('Val_Loss', model.val_loss)
        val_acc_op = tf.summary.scalar('Batch_Val_Acc', model.val_accuracy)
        model_acc_op = tf.summary.scalar('Split_Val_Accuracy',
                                         model.val_accumulated_accuracy)

        run_metadata = tf.RunMetadata()
        tf.local_variables_initializer().run()
        sess.run(validation_iterator.initializer)

        _val_acc_op = 0
        gts = []
        preds = []
        pred_3 = []
        pred_5 = []
        while True:
            try:
                # Eval network on validation/testing split
                feed_dict = {handle: validation_handle}
                gt, preds_raw, predictions, acc_per_class, val_loss_op, batch_accuracy, accuracy_op, _val_acc_op, _val_acc, c_cnf_mat, macro_acc = sess.run(
                    [
                        model.val_gt, model.val_preds,
                        model.val_class_prediction,
                        model.val_per_class_acc_acc, val_loss,
                        model.val_accuracy, model_acc_op, val_acc_op,
                        model.val_accumulated_accuracy,
                        model.val_confusion_mat, model.val_per_class_acc_acc
                    ], feed_dict)
                gts += list(gt)
                preds += list(predictions)

                for g, p in zip(gt, preds_raw):
                    preds_sort_3 = np.argsort(p)[-3:]
                    preds_sort_5 = np.argsort(p)[-5:]
                    if g in preds_sort_3:
                        pred_3 += [g]
                    else:
                        pred_3 += [preds_sort_3[-1]]

                    if g in preds_sort_5:
                        pred_5 += [g]
                    else:
                        pred_5 += [preds_sort_5[-1]]

                #print('Acc per class:',acc_per_class)
                #print('batch:',batch_accuracy)
                #print('Confusion Matrix:',c_cnf_mat)
                #print('gt:',gt)
                #print('preds:',preds_raw)
                #print('predictions:',predictions)
            #logger.info('Val Acc {0}, Macro Acc: {1}'.format(_val_acc,macro_acc))
            except tf.errors.OutOfRangeError:
                #    logger.info('problem:')
                #    logger.info('Val Acc {0}, Macro Acc: {1}'.format(_val_acc,macro_acc))
                logger.info('____ Clasification Report Top 1 ____')
                report = classification_report(gts, preds, output_dict=True)
                csv_pd = classification_report_csv(report)
                csv_pd.to_csv(
                    os.path.join(model_dir, 'Classification_Report_top1.csv'))
                logger.info(report)
                logger.info('____ Clasification Report Top 2 ____')
                report = classification_report(gts, pred_3, output_dict=True)
                csv_pd = classification_report_csv(report)
                csv_pd.to_csv(
                    os.path.join(model_dir, 'Classification_Report_top2.csv'))
                logger.info(report)
                logger.info('____ Clasification Report Top 3 ____')
                report = classification_report(gts, pred_5, output_dict=True)
                csv_pd = classification_report_csv(report)
                csv_pd.to_csv(
                    os.path.join(model_dir, 'Classification_Report_top3.csv'))
                logger.info(report)

                break
コード例 #7
0
def main(argv):
    cfg = BaseConfig().parse(argv)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu
    save_model_dir = cfg.checkpoint_dir
    model_basename = os.path.basename(save_model_dir)
    touch_dir(save_model_dir)

    args_file = os.path.join(cfg.checkpoint_dir, 'args.json')
    with open(args_file, 'w') as f:
        json.dump(vars(cfg), f, ensure_ascii=False, indent=2, sort_keys=True)
    # os_utils.touch_dir(save_model_dir)

    log_file = os.path.join(cfg.checkpoint_dir, cfg.log_filename + '.txt')
    os_utils.touch_dir(cfg.checkpoint_dir)
    logger = log_utils.create_logger(log_file)

    img_generator_class = locate(cfg.db_tuple_loader)
    args = dict()
    args['db_path'] = cfg.db_path
    args['tuple_loader_queue_size'] = cfg.tuple_loader_queue_size
    args['preprocess_func'] = cfg.preprocess_func
    args['batch_size'] = cfg.batch_size
    args['shuffle'] = False
    args['csv_file'] = cfg.train_csv_file
    args['img_size'] = const.max_frame_size
    args['gen_hot_vector'] = True
    train_iter = img_generator_class(args)
    args['batch_size'] = cfg.batch_size
    args['csv_file'] = cfg.test_csv_file
    val_iter = img_generator_class(args)

    trn_images, trn_lbls = train_iter.imgs_and_lbls()
    val_imgs, val_lbls = val_iter.imgs_and_lbls()

    with tf.Graph().as_default():
        if cfg.train_mode == 'semi_hard' or cfg.train_mode == 'hard' or cfg.train_mode == 'cntr':
            train_dataset = TripletTupleLoader(trn_images, trn_lbls,
                                               cfg).dataset
        elif cfg.train_mode == 'vanilla':
            train_dataset = QuickTupleLoader(trn_images,
                                             trn_lbls,
                                             cfg,
                                             is_training=True,
                                             shuffle=True,
                                             repeat=True).dataset
        else:
            raise NotImplementedError('{} is not a valid train mode'.format(
                cfg.train_mode))

        val_dataset = QuickTupleLoader(val_imgs,
                                       val_lbls,
                                       cfg,
                                       is_training=False,
                                       repeat=False).dataset
        handle = tf.placeholder(tf.string, shape=[])
        iterator = tf.data.Iterator.from_string_handle(
            handle, train_dataset.output_types, train_dataset.output_shapes)
        images_ph, lbls_ph = iterator.get_next()

        network_class = locate(cfg.network_name)
        model = network_class(cfg, images_ph=images_ph, lbls_ph=lbls_ph)

        # Which loss fn to impose. For example, softmax only is applied in vanilla mode,
        # while softmax + semi-hard triplet is applied in semi_hard mode.
        if cfg.train_mode == 'semi_hard':
            pre_logits = model.train_pre_logits
            _, w, h, channels = pre_logits.shape
            embed_dim = cfg.emb_dim
            embedding_net = ConvEmbed(emb_dim=embed_dim,
                                      n_input=channels,
                                      n_h=h,
                                      n_w=w)
            embedding = embedding_net.forward(pre_logits)
            embedding = tf.nn.l2_normalize(embedding, axis=-1, epsilon=1e-10)
            margin = cfg.margin
            gt_lbls = tf.argmax(model.gt_lbls, 1)
            metric_loss = triplet_semi.triplet_semihard_loss(
                gt_lbls, embedding, margin)
            logger.info('Triplet loss lambda {}, with margin {}'.format(
                cfg.triplet_loss_lambda, margin))
            total_loss = model.train_loss + cfg.triplet_loss_lambda * tf.reduce_mean(
                metric_loss)
        elif cfg.train_mode == 'hard':
            pre_logits = model.train_pre_logits
            _, w, h, channels = pre_logits.shape
            embed_dim = cfg.emb_dim
            embedding_net = ConvEmbed(emb_dim=embed_dim,
                                      n_input=channels,
                                      n_h=h,
                                      n_w=w)
            embedding = embedding_net.forward(pre_logits)
            embedding = tf.nn.l2_normalize(embedding, axis=-1, epsilon=1e-10)
            margin = cfg.margin

            logger.info('Triplet loss lambda {}, with margin {}'.format(
                cfg.triplet_loss_lambda, margin))
            gt_lbls = tf.argmax(model.gt_lbls, 1)
            metric_loss = triplet_hard.batch_hard(gt_lbls, embedding, margin)
            total_loss = model.train_loss + cfg.triplet_loss_lambda * tf.reduce_mean(
                metric_loss)
        elif cfg.train_mode == 'cntr':

            pre_logits = model.train_pre_logits
            _, w, h, channels = pre_logits.shape
            embed_dim = cfg.emb_dim
            embedding_net = ConvEmbed(emb_dim=embed_dim,
                                      n_input=channels,
                                      n_h=h,
                                      n_w=w)
            embedding = embedding_net.forward(pre_logits)
            embedding = tf.nn.l2_normalize(embedding, axis=-1, epsilon=1e-10)
            CENTER_LOSS_LAMBDA = 0.003
            CENTER_LOSS_ALPHA = 0.5
            num_fg_classes = cfg.num_classes
            gt_lbls = tf.argmax(model.gt_lbls, 1)
            center_loss_order, centroids, centers_update_op, appear_times, diff = center_loss.get_center_loss(
                embedding, gt_lbls, CENTER_LOSS_ALPHA, num_fg_classes)
            # sample_centroid = tf.reshape(tf.gather(centroids, gt_lbls), [-1, config.emb_dim])
            # center_loss_order = center_loss.center_loss(sample_centroid , embedding)
            logger.info('Center loss lambda {}'.format(CENTER_LOSS_LAMBDA))
            total_loss = model.train_loss + CENTER_LOSS_LAMBDA * tf.reduce_mean(
                center_loss_order)

        elif cfg.train_mode == 'vanilla':
            total_loss = model.train_loss

        logger.info('Train Mode {}'.format(cfg.train_mode))
        # variables_to_train = model.var_2_train();
        # logger.info('variables_to_train  ' + str(variables_to_train))

        trainable_vars = tf.trainable_variables()
        if cfg.caffe_iter_size > 1:  ## Accumulated Gradient
            ## Creation of a list of variables with the same shape as the trainable ones
            # initialized with 0s
            accum_vars = [
                tf.Variable(tf.zeros_like(tv.initialized_value()),
                            trainable=False) for tv in trainable_vars
            ]
            zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in accum_vars]

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if cfg.train_mode == const.Train_Mode.CNTR:
            update_ops.append(centers_update_op)

        # print(update_ops)

        with tf.control_dependencies(update_ops):

            global_step = tf.Variable(0, name='global_step', trainable=False)
            learning_rate = tf_utils.poly_lr(global_step, cfg)
            optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)

            if cfg.caffe_iter_size > 1:  ## Accumulated Gradient
                # grads = tf.Print(grads,[grads],'Grad Print');
                grads = optimizer.compute_gradients(total_loss, trainable_vars)
                # Adds to each element from the list you initialized earlier with zeros its gradient (works because accum_vars and gvs are in the same order)
                accum_ops = [
                    accum_vars[i].assign_add(gv[0])
                    for i, gv in enumerate(grads)
                ]
                iter_size = cfg.caffe_iter_size
                # Define the training step (part with variable value update)
                train_op = optimizer.apply_gradients(
                    [(accum_vars[i] / iter_size, gv[1])
                     for i, gv in enumerate(grads)],
                    global_step=global_step)

            else:
                grads = optimizer.compute_gradients(total_loss)
                train_op = optimizer.apply_gradients(grads,
                                                     global_step=global_step)

        sess = tf.InteractiveSession()
        training_iterator = train_dataset.make_one_shot_iterator()
        validation_iterator = val_dataset.make_initializable_iterator()
        training_handle = sess.run(training_iterator.string_handle())
        validation_handle = sess.run(validation_iterator.string_handle())

        tb_path = save_model_dir
        logger.info(tb_path)
        start_iter = tb_utils.get_latest_iteration(tb_path)

        train_writer = tf.summary.FileWriter(tb_path, sess.graph)
        tf.global_variables_initializer().run()
        saver = tf.train.Saver()  # saves variables learned during training

        ckpt_file = tf.train.latest_checkpoint(save_model_dir)
        logger.info('Model Path {}'.format(ckpt_file))
        load_model_msg = model.load_model(save_model_dir,
                                          ckpt_file,
                                          sess,
                                          saver,
                                          load_logits=False)
        logger.info(load_model_msg)

        ckpt_file = os.path.join(save_model_dir, cfg.checkpoint_filename)

        val_loss = tf.summary.scalar('Val_Loss', model.val_loss)
        val_acc_op = tf.summary.scalar('Batch_Val_Acc', model.val_accuracy)
        model_acc_op = tf.summary.scalar('Split_Val_Accuracy',
                                         model.val_accumulated_accuracy)

        best_model_step = 0
        best_acc = 0
        logger.info('Start Training from {}, till {}'.format(
            start_iter, cfg.train_iters))
        # Start Training
        for step in range(start_iter + 1, cfg.train_iters + 1):

            start_time_train = time.time()

            # Update network weights while supporting caffe_iter_size
            for mini_batch in range(cfg.caffe_iter_size - 1):
                feed_dict = {handle: training_handle}
                model_loss_value, accuracy_value, _ = sess.run(
                    [model.train_loss, model.train_accuracy, accum_ops],
                    feed_dict)

            feed_dict = {handle: training_handle}
            model_loss_value, accuracy_value, _ = sess.run(
                [model.train_loss, model.train_accuracy, train_op], feed_dict)
            if cfg.caffe_iter_size > 1:  ## Accumulated Gradient
                sess.run(zero_ops)

            train_time = time.time() - start_time_train

            if (step == 1 or step % cfg.logging_threshold == 0):
                logger.info(
                    'i {0:04d} loss {1:4f} Acc {2:2f} Batch Time {3:3f}'.
                    format(step, model_loss_value, accuracy_value, train_time))

                if (step % cfg.test_interval == 0):
                    run_metadata = tf.RunMetadata()
                    tf.local_variables_initializer().run()
                    sess.run(validation_iterator.initializer)

                    _val_acc_op = 0
                    while True:
                        try:

                            # Eval network on validation/testing split
                            feed_dict = {handle: validation_handle}
                            val_loss_op, batch_accuracy, accuracy_op, _val_acc_op, _val_acc, c_cnf_mat, macro_acc = sess.run(
                                [
                                    val_loss, model.val_accuracy, model_acc_op,
                                    val_acc_op, model.val_accumulated_accuracy,
                                    model.val_confusion_mat,
                                    model.val_per_class_acc_acc
                                ], feed_dict)
                        except tf.errors.OutOfRangeError:
                            logger.info('Val Acc {0}, Macro Acc: {1}'.format(
                                _val_acc, macro_acc))
                            break

                    train_writer.add_run_metadata(run_metadata,
                                                  'step%03d' % step)
                    train_writer.add_summary(val_loss_op, step)
                    train_writer.add_summary(_val_acc_op, step)
                    train_writer.add_summary(accuracy_op, step)
                    train_writer.flush()

                    if (step % 100 == 0):
                        saver.save(sess, ckpt_file)
                        if best_acc < _val_acc:
                            saver.save(sess, ckpt_file + 'best')
                            best_acc = _val_acc
                            best_model_step = step

                        logger.info('Best Acc {0} at {1} == {2}'.format(
                            best_acc, best_model_step, model_basename))

        logger.info('Triplet loss lambda {}'.format(cfg.triplet_loss_lambda))
        logger.info('Mode {}'.format(cfg.train_mode))
        logger.info('Loop complete')
        sess.close()
コード例 #8
0
# -*- coding: utf-8 -*-

import os
from collections import Counter
from utils import *
from config.base_config import BaseConfig

base_config = BaseConfig()
train_data_path = base_config.train_data_path


def preprocess_data():
    if not os.path.exists("./model"):
        os.mkdir("./model")
    if not os.path.exists("./log"):
        os.mkdir("./log")

    label2idx = {}
    words = []
    with open(train_data_path) as f:
        for line in f:
            line = json.loads(line)
            label = line["label"]
            text = line["sentence"]
            if label not in label2idx:
                label2idx[label] = len(label2idx)
            words.extend(list(text))

    words_counter = Counter(words).most_common(base_config.vocab_size - 2)
    words = ["<PAD>", "<UNK>"] + [w for w, c in words_counter]
    word_map = {word: idx for idx, word in enumerate(words)}
コード例 #9
0
def main(argv):

    cfg = BaseConfig().parse(argv)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu
    img_generator_class = locate(cfg.db_tuple_loader)
    args = dict()
    args['db_path'] = cfg.db_path
    args['tuple_loader_queue_size'] = cfg.tuple_loader_queue_size
    args['preprocess_func'] = cfg.preprocess_func
    args['batch_size'] = cfg.batch_size
    args['shuffle'] = False
    args['img_size'] = const.max_frame_size
    args['gen_hot_vector'] = True

    args['batch_size'] = cfg.batch_size
    args['csv_file'] = cfg.test_csv_file
    test_iter = img_generator_class(args)

    test_imgs, test_lbls = test_iter.imgs_and_lbls()
    cfg = BaseConfig().parse(argv)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu
    model_dir = cfg.checkpoint_dir
    print(model_dir)
    log_file = os.path.join(cfg.checkpoint_dir, cfg.log_filename + '_test.txt')
    logger = log_utils.create_logger(log_file)

    with tf.Graph().as_default():
        #meta_file = os.path.join(model_dir,'model.ckptbest.meta')
        #saver = tf.train.import_meta_graph(meta_file)
        #ckpt_file = os.path.join(model_dir,'model.ckptbest')
        #saver.restore(sess,ckpt_file)

        #print('Model Path {}'.format(ckpt_file))
        #load_model_msg = model.load_model(model_dir, ckpt_file, sess, saver, load_logits=True)
        #logger.info(load_model_msg)
        #graph = tf.get_default_graph()
        #print(graph.get_operations())

        test_dataset = QuickTupleLoader(test_imgs,
                                        test_lbls,
                                        cfg,
                                        is_training=False,
                                        repeat=False).dataset
        handle = tf.placeholder(tf.string, shape=[])
        iterator = tf.data.Iterator.from_string_handle(
            handle, test_dataset.output_types, test_dataset.output_shapes)
        images_ph, lbls_ph = iterator.get_next()

        network_class = locate(cfg.network_name)
        model = network_class(cfg, images_ph=images_ph, lbls_ph=lbls_ph)
        validation_iterator = test_dataset.make_initializable_iterator()

        sess = tf.InteractiveSession()
        validation_handle = sess.run(validation_iterator.string_handle())
        ckpt_file = tf.train.latest_checkpoint(model_dir)
        print(ckpt_file)
        tf.global_variables_initializer().run()
        saver = tf.train.Saver()
        load_model_msg = model.load_model(model_dir,
                                          ckpt_file,
                                          sess,
                                          saver,
                                          load_logits=True)
        print(load_model_msg)

        ckpt_file = os.path.join(model_dir, cfg.checkpoint_filename)

        val_loss = tf.summary.scalar('Val_Loss', model.val_loss)
        val_acc_op = tf.summary.scalar('Batch_Val_Acc', model.val_accuracy)
        model_acc_op = tf.summary.scalar('Split_Val_Accuracy',
                                         model.val_accumulated_accuracy)

        run_metadata = tf.RunMetadata()
        tf.local_variables_initializer().run()
        sess.run(validation_iterator.initializer)

        _val_acc_op = 0
        feat = []
        label = []
        pooling = []
        while True:
            try:
                # Eval network on validation/testing split
                feed_dict = {handle: validation_handle}
                features, labels = sess.run(
                    [model.val_end_features, model.val_features_labels],
                    feed_dict)

                print(labels.shape)
                feat.append(features['resnet_v2_50/block4'])
                pooling.append(features['global_pool'])
                label.append(labels)
                print('___________________')
            except tf.errors.OutOfRangeError:

                path = model_dir
                f_folder = os.path.join(model_dir, 'features')
                os.makedirs(f_folder, exist_ok=True)
                p_file = os.path.join(f_folder, 'pooling.npy')
                f_file = os.path.join(f_folder, 'features.npy')
                l_file = os.path.join(f_folder, 'labels.npy')
                print('pooling')
                pooling = np.concatenate(pooling)
                pooling = pooling.reshape(pooling.shape[0], -1)
                np.save(p_file, pooling)
                print('7x7')
                np.save(f_file, feat)
                print('labels')
                np.save(l_file, np.array(label))
                break
コード例 #10
0
def set_browser_type(browser):
    config = BaseConfig().read_config(config_path)
    config.set("WEBDRIVER", "browser", browser)
    with open(config_path, 'w') as configfile:
        config.write(configfile)
コード例 #11
0
from typing import Dict
from pydantic import Field, validator

from config.base_config import BaseConfig

bc = BaseConfig()


class DBConfig(BaseConfig):
    DB_DRIVER: str = Field(..., env=f"{bc.DIALECT}_DRIVER")
    DB_USER: str = Field(..., env=f"{bc.DIALECT}_USER")
    DB_PASS: str = Field(..., env=f"{bc.DIALECT}_PASS")
    DB_HOST: str = Field(..., env=f"{bc.DIALECT}_HOST")
    DB_PORT: int = Field(..., env=f"{bc.DIALECT}_PORT")
    DB_NAME: str = Field(..., env=f"{bc.DIALECT}_NAME")
    DB_URL: str = ""

    @validator("DB_URL")
    def _db_url_value(cls, db_url, values) -> str:
        return (f"{values['DIALECT']}+{values['DB_DRIVER']}://" +
                f"{values['DB_USER']}:{values['DB_PASS']}@" +
                f"{values['DB_HOST']}:{values['DB_PORT']}/{values['DB_NAME']}")

    RANKINGS: dict = {}

    @validator("RANKINGS")
    def _load_rankings(cls, rankings, values) -> dict:
        return cls.read_json(values["RANKINGS_FILE"])

    MATCHES: Dict[str, Dict[str, str]] = {}