コード例 #1
0
    def __init__(self):
        # Attributes
        # General
        self.data = Dataset()
        self.activity_label = [
            'A',
            'B',
            'C',
            'D',
            'E',
            'F',
            'G',
            'H',
            'I',
            'J',
            'K',
            'L',
            'M',
            'O',
            'P',
            'Q',
            'R',
            'S',
            'T',
            'U',
        ]

        # Views
        self.results_view = None
        self.data_import_view = None
        self.pca_graphics_view = None
        self.pca_utilization_view = None
        self.choose_classifier_view = None
        self.feature_selection_view = None
        self.feature_selection_view = None
コード例 #2
0
ファイル: test_model.py プロジェクト: sbrandtb/artshow-keeper
    def test_getAllItemsInAuction_Ordering(self):
        datasetAuction = Dataset(
                self.logger, './',
                self.sessionFile.getFilename(),
                self.itemFileAuctionOnly.getFilename(),
                self.currencyFile.getFilename())
        datasetAuction.restore()
        modelAuction = Model(
                self.logger,
                datasetAuction,
                self.currency)

        auctionItems = modelAuction.getAllItemsInAuction()
        auctionItems.sort(key=lambda item: item[ItemField.AUCTION_SORT_CODE])

        for item in auctionItems:
            print('{0} - {1}'.format(item[ItemField.AUTHOR], item[ItemField.AMOUNT]))

        # Check that there is no block authors larger than two
        largestBlockSize = 0
        largestBlockAuthor = None
        blockAuthor = None
        blockSize = 0
        for item in auctionItems:
            if blockAuthor is not None and item[ItemField.AUTHOR] == blockAuthor:
                blockSize = blockSize + 1
            else:
                if blockSize > largestBlockSize:
                    largestBlockSize = blockSize
                    largestBlockAuthor = blockAuthor
                blockAuthor = item[ItemField.AUTHOR]
                blockSize = 1
        self.assertGreaterEqual(2, largestBlockSize, 'Author: ' + str(largestBlockAuthor))
コード例 #3
0
ファイル: test_model.py プロジェクト: sbrandtb/artshow-keeper
    def setUp(self):
        self.logger = logging.getLogger()

        self.testFiles = []

        self.itemFile = Datafile('test.model.items.xml', self.id())
        self.itemFileAuctionOnly = Datafile('test.model.items.auction_only.xml', self.id())
        self.sessionFile = Datafile('test.model.session.xml', self.id())
        self.currencyFile = Datafile('test.model.currency.xml', self.id())
        self.importFileCsv = Datafile('test.model.import.csv', self.id())
        self.importFileTxt = Datafile('test.model.import.txt', self.id())

        self.dataset = Dataset(
                self.logger, './',
                self.sessionFile.getFilename(),
                self.itemFile.getFilename(),
                self.currencyFile.getFilename())
        self.dataset.restore()

        self.currency = Currency(
                self.logger,
                self.dataset,
                currencyCodes=['czk', 'eur'])
        self.model = Model(
                self.logger,
                self.dataset,
                self.currency)
コード例 #4
0
    def setUp(self):
        self.logger = logging.getLogger()

        self.itemFile = Datafile('test.model.items.xml', self.id())
        self.sessionFile = Datafile('test.model.session.xml', self.id())
        self.currencyFile = Datafile('test.model.currency.xml', self.id())

        self.dataset = Dataset(self.logger, './',
                               self.sessionFile.getFilename(),
                               self.itemFile.getFilename(),
                               self.currencyFile.getFilename())
 def train(self):
     dirs = self.config.DATA_DIR
     live_dir = self.config.DATA_DIR_LIVE[0]
     while True:
         for dir in dirs:
             train_dirs = [d for d in dirs if d != dir]
             train_dirs.append(live_dir)
             train = Dataset(self.config, 'train', train_dirs, dir)
             epochs = int((self.config.MAX_EPOCH % len(dirs)) /
                          len(dirs)) + self.config.MAX_EPOCH
             self._train(train, self.last_epoch + epochs)
             self.last_epoch += epochs
コード例 #6
0
ファイル: trainer.py プロジェクト: dennisl88/3DGCN
    def load_data(self, dataset=None, batch=128):
        self.data = Dataset(dataset=dataset, batch=batch)

        self.hyper["num_train"] = len(self.data.y["train"])
        self.hyper["num_val"] = len(self.data.y["valid"])
        self.hyper["num_test"] = len(self.data.y["test"])
        self.hyper["target_size"] = self.data.target_size
        self.hyper["molecule_size"] = self.data.molecule_size
        self.hyper["num_features"] = self.data.num_features
        self.hyper["task"] = self.data.task
        self.hyper["outputs"] = self.data.outputs
        self.hyper["batch"] = batch
        print("finish loading data with batch size", batch)
コード例 #7
0
ファイル: test.py プロジェクト: yaojieliu/ECCV20-STDN
def main(argv=None): 
  # Configurations
  config = Config(gpu='1',
                  root_dir='./data/test/',
                  root_dir_val=None,
                  mode='testing')
  config.BATCH_SIZE = 1

  # Get images and labels.
  dataset_test = Dataset(config, 'test')

  # Train
  _M, _s, _b, _C, _T, _imname = _step(config, dataset_test, False)

  # Add ops to save and restore all the variables.
  saver = tf.train.Saver(max_to_keep=50,)
  with tf.Session(config=config.GPU_CONFIG) as sess:
    # Restore the model
    ckpt = tf.train.get_checkpoint_state(config.LOG_DIR)
    if ckpt and ckpt.model_checkpoint_path:
      saver.restore(sess, ckpt.model_checkpoint_path)
      last_epoch = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
      print('**********************************************************')
      print('Restore from Epoch '+str(last_epoch))
      print('**********************************************************')
    else:
      init = tf.initializers.global_variables()
      last_epoch = 0
      sess.run(init)
      print('**********************************************************')
      print('Train from scratch.')
      print('**********************************************************')

    step_per_epoch = int(len(dataset_test.name_list) / config.BATCH_SIZE)
    with open(config.LOG_DIR + '/test/score.txt', 'w') as f:
      for step in range(step_per_epoch):
        M, s, b, C, T, imname = sess.run([_M, _s, _b, _C, _T, _imname])
        # save the score
        for i in range(config.BATCH_SIZE):
            _name = imname[i].decode('UTF-8')
            _line = _name + ',' + str("{0:.3f}".format(M[i])) + ','\
                                + str("{0:.3f}".format(s[i])) + ','\
                                + str("{0:.3f}".format(b[i])) + ','\
                                + str("{0:.3f}".format(C[i])) + ','\
                                + str("{0:.3f}".format(T[i]))
            f.write(_line + '\n')  
            print(str(step+1)+'/'+str(step_per_epoch)+':'+_line, end='\r')  
    print("\n")
コード例 #8
0
def main(argv=None):
    # Configurations
    config = Config()
    config.DATA_DIR = ['/data/']
    config.LOG_DIR = './log/model'
    config.MODE = 'training'
    config.STEPS_PER_EPOCH_VAL = 180
    config.display()

    # Get images and labels.
    dataset_train = Dataset(config, 'train')
    # Build a Graph
    model = Model(config)

    # Train the model
    model.compile()
    model.train(dataset_train, None)
コード例 #9
0
    def setUp(self):
        self.logger = logging.getLogger()

        self.itemFile = Datafile('test.model.items.xml', self.id())
        self.sessionFile = Datafile('test.model.session.xml', self.id())
        self.currencyFile = Datafile('test.model.currency.xml', self.id())

        self.dataset = Dataset(
                self.logger, './',
                self.sessionFile.getFilename(),
                self.itemFile.getFilename(),
                self.currencyFile.getFilename())
        self.dataset.restore()

        self.currency = Currency(
                self.logger,
                self.dataset,
                currencyCodes=['czk', 'eur', 'usd'])
 def test(self):
     dirs = self.config.DATA_DIR_TEST
     dataset = Dataset(self.config, 'test', dirs)
     for image, dmap, labels in dataset.feed:
         dmap_pred, cls_pred, route_value, leaf_node_mask = self.dtn(
             image, labels, False)
         # leaf counts
         spoof_counts = []
         for leaf in leaf_node_mask:
             spoof_count = tf.reduce_sum(leaf[:, 0]).numpy()
             spoof_counts.append(int(spoof_count))
         cls_total = tf.math.add_n(cls_pred) / len(cls_pred)
         index = 0
         for label in tf.unstack(labels):
             cls = cls_total[index].numpy()
             if cls < 0.8 or cls > 1.2:
                 logging.info("label: {}, cls: {}".format(
                     label.numpy(), cls))
             index += 1
コード例 #11
0
def main():
    global TRAIN_FROM_CHECKPOINT

    gpus = tf.config.experimental.list_physical_devices('GPU')
    print(f'GPUs {gpus}')
    if len(gpus) > 0:
        try:
            tf.config.experimental.set_memory_growth(gpus[0], True)
        except RuntimeError:
            pass

    if os.path.exists(TRAIN_LOGDIR): shutil.rmtree(TRAIN_LOGDIR)
    writer = tf.summary.create_file_writer(TRAIN_LOGDIR)

    trainset = Dataset('train')
    testset = Dataset('test')

    steps_per_epoch = len(trainset)
    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = TRAIN_WARMUP_EPOCHS * steps_per_epoch
    total_steps = TRAIN_EPOCHS * steps_per_epoch

    if TRAIN_TRANSFER:
        Darknet = Create_Yolo(input_size=YOLO_INPUT_SIZE,
                              CLASSES=YOLO_COCO_CLASSES)
        load_yolo_weights(Darknet, Darknet_weights)  # use darknet weights

    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE,
                       training=True,
                       CLASSES=TRAIN_CLASSES)
    if TRAIN_FROM_CHECKPOINT:
        try:
            yolo.load_weights(f"{TRAIN_CHECKPOINTS_FOLDER}/{TRAIN_MODEL_NAME}")
        except ValueError:
            print("Shapes are incompatible, transfering Darknet weights")
            TRAIN_FROM_CHECKPOINT = False

    if TRAIN_TRANSFER and not TRAIN_FROM_CHECKPOINT:
        for i, l in enumerate(Darknet.layers):
            layer_weights = l.get_weights()
            if layer_weights != []:
                try:
                    yolo.layers[i].set_weights(layer_weights)
                except:
                    print("skipping", yolo.layers[i].name)

    optimizer = tf.keras.optimizers.Adam()

    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = yolo(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            grid = 3 if not TRAIN_YOLO_TINY else 2
            for i in range(grid):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred,
                                          conv,
                                          *target[i],
                                          i,
                                          CLASSES=TRAIN_CLASSES)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, yolo.trainable_variables)
            optimizer.apply_gradients(zip(gradients, yolo.trainable_variables))

            # update learning rate
            # about warmup: https://arxiv.org/pdf/1812.01187.pdf&usg=ALkJrhglKOPDjNt6SHGbphTHyMcT0cuMJg
            global_steps.assign_add(1)
            if global_steps < warmup_steps:  # and not TRAIN_TRANSFER:
                lr = global_steps / warmup_steps * TRAIN_LR_INIT
            else:
                lr = TRAIN_LR_END + 0.5 * (TRAIN_LR_INIT - TRAIN_LR_END) * (
                    (1 + tf.cos((global_steps - warmup_steps) /
                                (total_steps - warmup_steps) * np.pi)))
            optimizer.lr.assign(lr.numpy())

        return global_steps.numpy(), optimizer.lr.numpy(), giou_loss.numpy(
        ), conf_loss.numpy(), prob_loss.numpy(), total_loss.numpy()

    validate_writer = tf.summary.create_file_writer(TRAIN_LOGDIR)

    def validate_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = yolo(image_data, training=False)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            grid = 3 if not TRAIN_YOLO_TINY else 2
            for i in range(grid):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred,
                                          conv,
                                          *target[i],
                                          i,
                                          CLASSES=TRAIN_CLASSES)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

        return giou_loss.numpy(), conf_loss.numpy(), prob_loss.numpy(
        ), total_loss.numpy()

    mAP_model = Create_Yolo(
        input_size=YOLO_INPUT_SIZE,
        CLASSES=TRAIN_CLASSES)  # create second model to measure mAP

    best_val_loss = 10000  # should be large at start

    for epoch in range(TRAIN_EPOCHS):
        count_train, giou_train, conf_train, prob_train, total_train, lr = 0., 0, 0, 0, 0, 0
        for image_data, target in trainset:
            results = train_step(image_data, target)
            cur_step = results[0] % steps_per_epoch
            count_train += 1
            lr += results[1]
            giou_train += results[2]
            conf_train += results[3]
            prob_train += results[4]
            total_train += results[5]
            print(
                "epoch:{:2.0f} step:{:5.0f}/{}, lr:{:.6f}, giou_loss:{:7.2f}, conf_loss:{:7.2f}, prob_loss:{:7.2f}, total_loss:{:7.2f}"
                .format(epoch, cur_step, steps_per_epoch, results[1],
                        results[2], results[3], results[4], results[5]))

        # writing summary data
        with writer.as_default():
            tf.summary.scalar("lr", lr / count_train, step=epoch)
            tf.summary.scalar("train loss/total_loss",
                              total_train / count_train,
                              step=epoch)
            tf.summary.scalar("train_loss/giou_loss",
                              giou_train / count_train,
                              step=epoch)
            tf.summary.scalar("train_loss/conf_loss",
                              conf_train / count_train,
                              step=epoch)
            tf.summary.scalar("train_loss/prob_loss",
                              prob_train / count_train,
                              step=epoch)
        writer.flush()

        if len(testset) == 0:
            print("configure TEST options to validate model")
            yolo.save_weights(
                os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME))
            continue

        count_val, giou_val, conf_val, prob_val, total_val = 0., 0, 0, 0, 0
        for image_data, target in testset:
            results = validate_step(image_data, target)
            count_val += 1
            giou_val += results[0]
            conf_val += results[1]
            prob_val += results[2]
            total_val += results[3]

        # mAP = get_mAP(yolo, testset, score_threshold=TEST_SCORE_THRESHOLD, iou_threshold=TEST_IOU_THRESHOLD)

        # writing validate summary dat
        with validate_writer.as_default():
            tf.summary.scalar("validate_loss/total_val",
                              total_val / count_val,
                              step=epoch)
            tf.summary.scalar("validate_loss/giou_val",
                              giou_val / count_val,
                              step=epoch)
            tf.summary.scalar("validate_loss/conf_val",
                              conf_val / count_val,
                              step=epoch)
            tf.summary.scalar("validate_loss/prob_val",
                              prob_val / count_val,
                              step=epoch)
        validate_writer.flush()

        print(
            "\n\ngiou_val_loss:{:7.2f}, conf_val_loss:{:7.2f}, prob_val_loss:{:7.2f}, total_val_loss:{:7.2f}\n\n"
            .format(giou_val / count_val, conf_val / count_val,
                    prob_val / count_val, total_val / count_val))

        if TRAIN_SAVE_CHECKPOINT and not TRAIN_SAVE_BEST_ONLY:
            save_directory = os.path.join(
                TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME +
                "_val_loss_{:7.2f}".format(total_val / count))
            yolo.save_weights(save_directory)
        if TRAIN_SAVE_BEST_ONLY and best_val_loss > total_val / count_val:
            save_directory = os.path.join(TRAIN_CHECKPOINTS_FOLDER,
                                          TRAIN_MODEL_NAME)
            yolo.save(save_directory)
            best_val_loss = total_val / count_val
        if not TRAIN_SAVE_BEST_ONLY and not TRAIN_SAVE_CHECKPOINT:
            save_directory = os.path.join(TRAIN_CHECKPOINTS_FOLDER,
                                          TRAIN_MODEL_NAME)
            yolo.save_weights(save_directory)

    # measure mAP of trained custom model
    try:
        mAP_model.load_weights(save_directory +
                               '/variables/variables')  # use keras weights
        get_mAP(mAP_model,
                testset,
                score_threshold=TEST_SCORE_THRESHOLD,
                iou_threshold=TEST_IOU_THRESHOLD)
    except UnboundLocalError:
        print(
            "You don't have saved model weights to measure mAP, check TRAIN_SAVE_BEST_ONLY AND TRAIN SAVE_CHECKPOINT lines in configs.py"
        )
コード例 #12
0
ファイル: train.py プロジェクト: umitkacar/Kaggle-DeepFakes
    config.LOG_DIR = './log/model'
    config.MODE = 'training'
    config.STEPS_PER_EPOCH = 2000
    config.MAX_EPOCH = 1000
    config.LEARNING_RATE = 0.00001 #0.00005 #0.0001 #0.0005 #0.001
    config.BATCH_SIZE = 20
    # Validation
    config.DATA_DIR_VAL = ["/home/umit/xDataset/deepFake-dat/Train_Fake_Much_1",
                           "/home/umit/xDataset/deepFake-dat/Train_Live_Few_1"]
    config.STEPS_PER_EPOCH_VAL = 500
   
    config.display()

    # Get images and labels.
    dataset_train = Dataset(config,'train')
    #dataset_validation = Dataset(config,'validation')
    
    # Build a Graph
    model = Model(config)

    # # Train the model
    model.compile()
    model.train(dataset_train, val=None)
    
# Epoch 47-1000/1000: Map:0.116, Cls:0.0997, Route:1.78(0.108, 151.491), Uniq:nan, Counts:[1,1,0,0,6,2,0,5]     

#     Time taken for epoch 47 is 1144.3 sec
# Epoch 48-1000/1000: Map:0.116, Cls:0.0988, Route:1.78(0.091, 142.153), Uniq:nan, Counts:[2,1,0,1,4,1,2,8]     

#     Time taken for epoch 48 is 1123.81 sec
コード例 #13
0
from model.trainer import RRSSTrainer
from model.models import *
from model.dataset import Dataset
import tensorflow as tf
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

if __name__ == "__main__":
    tf.config.experimental.set_memory_growth(
        tf.config.experimental.list_physical_devices('GPU')[0], True)

    # Load data
    dataset = Dataset(5, normalize=False)

    # Load model
    model = InteractionNetCNC(units_embed=256,
                              units_conv=256,
                              units_fc=256,
                              pooling='sum',
                              dropout=0.5,
                              activation='relu',
                              target=1,
                              activation_out='linear',
                              regularizer=0.0025,
                              num_atoms=dataset.num_atoms,
                              num_features=dataset.num_features,
                              num_conv_layers_intra=1,
                              num_conv_layers_inter=1,
                              num_fc_layers=2)
コード例 #14
0
ファイル: train.py プロジェクト: endingback/yolo
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_name(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.per_epch_num = len(self.trainset)
        self.sess = tf.compat.v1.Session(config = tf.compat.v1.ConfigProto(allow_soft_placement = True)) #GPU 自动调用

        with tf.name_scope('define_input'):
            self.input_data = tf.compat.v1.placeholder(dtype = tf.float32, name='input_data')
            self.label_sbbox = tf.compat.v1.placeholder(dtype= tf.float32, name = 'label_sbbox')
            self.label_mbbox = tf.compat.v1.placeholder(dtype = tf.float32, name = 'label_mbbox')
            self.label_lbbox = tf.compat.v1.placeholder(dtype = tf.float32, name= 'label_lbbox')
            self.true_mbbox = tf.compat.v1.placeholder(dtype = tf.float32, name = 'true_mbbox')
            self.true_sbbox = tf.compat.v1.placeholder(dtype = tf.float32, name = 'true_sbbox')
            self.true_lbbox = tf.compat.v1.placeholder(dtype = tf.float32, name = 'true_lbbox')
            self.trainable =tf.compat.v1.placeholder(dtype = tf.bool, name = 'training')

        with tf.name_scope('define_loss'):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.compat.v1.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_mbbox, self.label_lbbox, self.label_sbbox, self.true_lbbox, self.true_sbbox, self.true_mbbox
            )
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype = tf.float64, trainable = False, name = 'global_step')
            # why set warmup_setps
            warmup_setps = tf.constant(self.warmup_periods * self.per_epch_num)
            train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.per_epch_num,
                                      dtype= tf.float64, name='train_steps')
            # training learn rate how to change
            self.learn_rate = tf.cond(
                pred = self.global_step < warmup_setps, #预热 ,周期性变化在最大最小学习率之间
                true_fn=lambda : self.global_step / warmup_setps * self.learn_rate_init,
                false_fn = lambda : self.learn_rate_end + 0.5*(self.learn_rate_init - self.learn_rate_end)*
                                    (1 + tf.cos(
                                        (self.global_step - warmup_setps) / (train_steps - warmup_setps) * np.pi)
                                    )
            )
            global_setp_update = tf.compat.v1.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.compat.v1.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_trainable_var_list = []
            for var in tf.compat.v1.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
                    self.first_trainable_var_list.append(var)
            first_stage_optimizer = tf.compat.v1.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                                                               var_list = self.first_trainable_var_list)
            with tf.control_dependencies(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)):
                #保存训练之前完成的一些操作 优化器,步数的变化
                with tf.control_dependencies([first_stage_optimizer, global_setp_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.compat.v1.trainable_variables()
            second_stage_optimizer = tf.compat.v1.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                                                                var_list = second_stage_trainable_var_list)

            with tf.control_dependencies(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)):
                #保存训练之前完成的一些操作 优化器,步数的变化
                with tf.control_dependencies([second_stage_optimizer, global_setp_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_sever'):
            self.loader = tf.compat.v1.train.Saver(self.net_var) #保存全局变量
            self.saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir) #递归删除文件夹中内容
            os.makedirs(logdir)
            self.write_op = tf.compat.v1.summary.merge_all() #可以将所有summary全部保存到磁盘,以便tensorboard显示
            self.summary_writer = tf.compat.v1.summary.FileWriter(logdir, graph=self.sess.graph) #保存图结构
コード例 #15
0
def scatterplot_and_histogram(model, hyper, target='test'):
    tick = 2.0

    # Make folder
    fig_path = "../analysis/{}".format(model)
    if not os.path.isdir(fig_path):
        os.mkdir(fig_path)
    fig_path = "../analysis/{}/{}".format(model, hyper)
    if not os.path.isdir(fig_path):
        os.mkdir(fig_path)

    # Load results
    base_path = "../result/{}/{}/".format(model, hyper)
    for trial in range(20):
        path = base_path + 'trial_{:02d}/'.format(trial)

        # Load model
        custom_objects = {'NodeEmbedding': NodeEmbedding,
                          'GraphConvolution': GraphConvolution,
                          'Normalize': Normalize,
                          'GlobalPooling': GlobalPooling}
        model = load_model(path + 'best_model.h5', custom_objects=custom_objects)

        # Load data
        data = np.load(path + 'data_split.npz')
        dataset = Dataset('refined', 5)
        dataset.split_by_idx(32, data['train'], data['valid'], data['test'])
        data.close()

        # Predict
        if target == 'train':
            pred_y = model.predict(dataset.train, steps=dataset.train_step, verbose=0).flatten()
            true_y = dataset.train_y
            if len(pred_y) <= len(true_y):
                true_y = true_y[:len(pred_y)]
            else:
                pred_y = pred_y[:len(true_y)]
        elif target == 'valid':
            pred_y = model.predict(dataset.valid, steps=dataset.valid_step, verbose=0).flatten()
            true_y = dataset.valid_y
            if len(pred_y) <= len(true_y):
                true_y = true_y[:len(pred_y)]
            else:
                pred_y = pred_y[:len(true_y)]
        else:
            pred_y = model.predict(dataset.test, steps=dataset.test_step, verbose=0).flatten()
            true_y = dataset.test_y
            if len(pred_y) <= len(true_y):
                true_y = true_y[:len(pred_y)]
            else:
                pred_y = pred_y[:len(true_y)]

        diff_y = true_y - pred_y

        # Draw figure
        fig, axes = plt.subplots(1, 2)
        fig.set_size_inches(5, 2)

        # Scatterplot and trend line
        axes[0].scatter(true_y, pred_y, c='#000000ff', s=2, linewidth=1)
        axes[0].set_aspect('equal', 'box')
        x_min, x_max, y_min, y_max = axes[0].axis()
        axes[0].set_xlim(min(x_min, y_min), max(x_max, y_max))
        axes[0].set_ylim(min(x_min, y_min), max(x_max, y_max))
        axes[0].set_xticks(np.arange(int(min(x_min, y_min)), max(x_max, y_max), tick))
        axes[0].set_yticks(np.arange(int(min(x_min, y_min)), max(x_max, y_max), tick))
        axes[0].spines['right'].set_visible(False)
        axes[0].spines['top'].set_visible(False)

        x_min, x_max, y_min, y_max = axes[0].axis()
        trend_z = np.polyfit(true_y, pred_y, 1)
        trend_p = np.poly1d(trend_z)
        axes[0].plot([x_min, x_max], [trend_p(x_min), trend_p(x_max)], color='black', alpha=1, linestyle="-")

        # Histogram
        bins = np.linspace(np.floor(np.min(diff_y)), np.ceil(np.max(diff_y)), 25)
        n, x, _ = axes[1].hist(diff_y, bins=bins, histtype='bar', density=True, color='orange')
        density = gaussian_kde(diff_y)
        axes[1].plot(x, density(x), linestyle='-', color="black")

        x_min, x_max, y_min, y_max = axes[1].axis()
        x_limit = np.ceil(max(np.absolute(x_min), x_max))
        axes[1].set_xlim(-x_limit, x_limit)
        axes[1].set_ylim(0, 0.5)
        axes[1].set_xticks(np.arange(-x_limit, x_limit + 0.01, tick))
        asp = np.diff(axes[1].get_xlim())[0] / np.diff(axes[1].get_ylim())[0]
        axes[1].set_aspect(asp)
        axes[1].spines['right'].set_visible(False)
        axes[1].spines['top'].set_visible(False)

        # Save analysis
        fig_name = fig_path + '/{}_histogram_{}.png'.format(trial, target)
        plt.savefig(fig_name, dpi=600)
        print('Histogram saved on {}'.format(fig_name))
        plt.clf()
コード例 #16
0
import json

from model.dataset import Dataset
from model.models import BasicModel

model = BasicModel.compile(640, 480)

with open("manifest.json", "rb") as infile:
    manifest = json.loads(infile.read())
dataset = Dataset(manifest, "images/normalized")

train_img, train_label, test_img, test_label = dataset.load_data()
model.fit(train_img, train_label, batch_size=5, epochs=25, verbose=1)
loss, acc = model.evaluate(test_img, test_label, verbose=1)
print loss, acc
# model.save("out.m5")
コード例 #17
0
ファイル: lrp.py プロジェクト: danny305/InteractionNet
def perform_lrp(model, hyper, trial=0, sample=None, epsilon=0.1, gamma=0.1):
    tf.config.experimental.set_memory_growth(
        tf.config.experimental.list_physical_devices('GPU')[0], True)

    # Make folder
    fig_path = "../analysis/{}".format(model)
    if not os.path.isdir(fig_path):
        os.mkdir(fig_path)
    fig_path = "../analysis/{}/{}".format(model, hyper)
    if not os.path.isdir(fig_path):
        os.mkdir(fig_path)
    fig_path = "../analysis/{}/{}/heatmap".format(model, hyper)
    if not os.path.isdir(fig_path):
        os.mkdir(fig_path)

    # Load results
    base_path = "../result/{}/{}/".format(model, hyper)
    path = base_path + 'trial_{:02d}/'.format(trial)

    # Load hyper
    with open(path + 'hyper.csv', newline='') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            hyper = dict(row)

    # Load model
    custom_objects = {
        'NodeEmbedding': NodeEmbedding,
        'GraphConvolution': GraphConvolution,
        'Normalize': Normalize,
        'GlobalPooling': GlobalPooling
    }
    model = load_model(path + 'best_model.h5', custom_objects=custom_objects)
    print([l.name for l in model.layers])

    # Load data
    data = np.load(path + 'data_split.npz')
    dataset = Dataset('refined', 5)
    if sample is not None:
        dataset.split_by_idx(32, data['train'], data['valid'],
                             data['test'][sample])
    else:
        dataset.split_by_idx(32, data['train'], data['valid'], data['test'])
    data.close()

    # Predict
    true_y = dataset.test_y
    outputs = {}
    for layer_name in [
            'node_embedding', 'node_embedding_1', 'normalize', 'normalize_1',
            'activation', 'add', 'activation_1', 'add_1', 'global_pooling',
            'activation_2', 'activation_3', 'activation_4',
            'atom_feature_input'
    ]:
        sub_model = tf.keras.models.Model(
            inputs=model.input, outputs=model.get_layer(layer_name).output)
        outputs[layer_name] = sub_model.predict(dataset.test,
                                                steps=dataset.test_step,
                                                verbose=0)[:len(true_y)]

    # Output layer: LRP-0
    # print('Calculating Dense_2...')
    relevance = lrp_dense(outputs['activation_3'],
                          outputs['activation_4'],
                          model.get_layer('dense_2').get_weights()[0],
                          model.get_layer('dense_2').get_weights()[1],
                          epsilon=0)

    # Dense layer: LRP-e
    # print('Calculating Dense_1...')
    relevance = lrp_dense(outputs['activation_2'],
                          relevance,
                          model.get_layer('dense_1').get_weights()[0],
                          model.get_layer('dense_1').get_weights()[1],
                          epsilon=epsilon)

    # Dense layer: LRP-e
    # print('Calculating Dense_0...')
    relevance = lrp_dense(outputs['global_pooling'],
                          relevance,
                          model.get_layer('dense').get_weights()[0],
                          model.get_layer('dense').get_weights()[1],
                          epsilon=epsilon)

    # Pooling layer
    # print('Calculating Pooling...')
    relevance = lrp_pooling(outputs['activation_1'], relevance)

    # Add layer
    # print('Calculating Add_1...')
    relevance_1, relevance_2 = lrp_add(
        [outputs['add'], outputs['activation_1']], relevance)

    # GCN layer: LRP-g
    # print('Calculating GCN_1...')
    relevance = lrp_gcn_gamma(
        outputs['add'],
        relevance_2,
        outputs['normalize_1'],
        model.get_layer('graph_convolution_1').get_weights()[0],
        gamma=gamma) + relevance_1

    # Add layer
    # print('Calculating Add_0...')
    relevance_1, relevance_2 = lrp_add(
        [outputs['graph_embedding_1'], outputs['activation']], relevance)

    # GCN layer: LRP-g
    # print('Calculating GCN_0...')
    relevance = lrp_gcn_gamma(
        outputs['graph_embedding_1'],
        relevance_2,
        outputs['normalize'],
        model.get_layer('graph_convolution').get_weights()[0],
        gamma=gamma) + relevance_1

    # Embedding layer : LRP-e
    # print('Calculating Embedding_1...')
    relevance = lrp_dense(
        outputs['graph_embedding'],
        relevance,
        model.get_layer('graph_embedding_1').get_weights()[0],
        model.get_layer('graph_embedding_1').get_weights()[1],
        epsilon=epsilon)

    # Embedding layer : LRP-e
    # print('Calculating Embedding_0...')
    relevance = lrp_dense(outputs['atom_feature_input'],
                          relevance,
                          model.get_layer('graph_embedding').get_weights()[0],
                          model.get_layer('graph_embedding').get_weights()[1],
                          epsilon=epsilon)

    relevance = tf.math.reduce_sum(relevance, axis=-1).numpy()
    relevance = np.divide(relevance, np.expand_dims(true_y, -1))

    # Preset
    DrawingOptions.bondLineWidth = 1.5
    DrawingOptions.elemDict = {}
    DrawingOptions.dotsPerAngstrom = 20
    DrawingOptions.atomLabelFontSize = 4
    DrawingOptions.atomLabelMinFontSize = 4
    DrawingOptions.dblBondOffset = 0.3
    DrawingOptions.wedgeDashedBonds = False

    # Load data
    dataframe = pd.read_pickle('../data/5A.pkl')
    if sample is not None:
        test_set = np.load(path + 'data_split.npz')['test'][sample]
    else:
        test_set = np.load(path + 'data_split.npz')['test']

    # Draw images for test molecules
    colormap = cm.get_cmap('seismic')
    for idx, test_idx in enumerate(test_set):
        print('Drawing figure for {}/{}'.format(idx, len(test_set)))
        pdb_code = dataframe.iloc[test_idx]['code']
        error = np.absolute(dataframe.iloc[test_idx]['output'] -
                            outputs['activation_4'][idx])[0]
        if error > 0.2: continue

        for mol_ligand, mol_pocket in zip(
                Chem.SDMolSupplier(
                    '../data/refined-set/{}/{}_ligand.sdf'.format(
                        pdb_code, pdb_code)),
                Chem.SDMolSupplier(
                    '../data/refined-set/{}/{}_pocket.sdf'.format(
                        pdb_code, pdb_code))):

            # Crop atoms
            mol = Chem.CombineMols(mol_ligand, mol_pocket)
            distance = np.array(rdmolops.Get3DDistanceMatrix(mol))
            cropped_idx = np.argwhere(
                np.min(distance[:, :mol_ligand.GetNumAtoms()], axis=1) <= 5
            ).flatten()
            unpadded_relevance = np.zeros((mol.GetNumAtoms(), ))
            np.put(unpadded_relevance, cropped_idx, relevance[idx])
            scale = max(max(unpadded_relevance),
                        math.fabs(min(unpadded_relevance))) * 3

            # Separate fragments in Combined Mol
            idxs_frag = rdmolops.GetMolFrags(mol)
            mols_frag = rdmolops.GetMolFrags(mol, asMols=True)

            # Draw fragment and interaction
            for i, (mol_frag,
                    idx_frag) in enumerate(zip(mols_frag[1:], idxs_frag[1:])):
                # Ignore water
                if mol_frag.GetNumAtoms() == 1:
                    continue

                # Generate 2D image
                mol_combined = Chem.CombineMols(mols_frag[0], mol_frag)
                AllChem.Compute2DCoords(mol_combined)
                fig = Draw.MolToMPL(mol_combined, coordScale=1)
                fig.axes[0].set_axis_off()

                # Draw line between close atoms (5A)
                flag = False
                for j in range(mol_ligand.GetNumAtoms()):
                    for k in idx_frag:
                        if distance[j, k] <= 5:
                            # Draw connection
                            coord_li = mol_combined._atomPs[j]
                            coord_po = mol_combined._atomPs[
                                idx_frag.index(k) + mols_frag[0].GetNumAtoms()]
                            x, y = np.array([[coord_li[0], coord_po[0]],
                                             [coord_li[1], coord_po[1]]])
                            line = Line2D(x,
                                          y,
                                          color='b',
                                          linewidth=1,
                                          alpha=0.3)
                            fig.axes[0].add_line(line)
                            flag = True

                # Draw heatmap for atoms
                for j in range(mol_combined.GetNumAtoms()):
                    relevance_li = unpadded_relevance[j]
                    relevance_li = relevance_li / scale + 0.5
                    highlight = plt.Circle(
                        (mol_combined._atomPs[j][0],
                         mol_combined._atomPs[j][1]),
                        0.035 * math.fabs(unpadded_relevance[j] / scale) +
                        0.008,
                        color=colormap(relevance_li),
                        alpha=0.8,
                        zorder=0)
                    fig.axes[0].add_artist(highlight)

                # Save
                if flag:
                    fig_name = fig_path + '/{}_lrp_{}_{}_{}.png'.format(
                        trial, test_idx, pdb_code, i)
                    fig.savefig(fig_name, bbox_inches='tight')
                plt.close(fig)
コード例 #18
0
                    help="Directory containing the dataset")
parser.add_argument(
    '--restore_from',
    default=None,
    help=
    "Optional, directory or file containing weights to reload before training")

if __name__ == '__main__':
    # Get arguments
    args = parser.parse_args()

    # Set the random seed for the whole graph
    tf.set_random_seed(100)

    # Set the logger
    set_logger(os.path.join(args.model_dir, 'train.log'))

    # Load the parameters
    params = Params(os.path.join(args.model_dir, 'params.json'))

    # Initialize the dataset for training
    dataset = Dataset(args.data_dir, params)

    # Create the model
    model = Model(dataset, params)

    # Train the model
    logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
    train_and_evaluate(model.train_spec, model.eval_spec, args.model_dir,
                       params, args.restore_from)
コード例 #19
0
def main(argv=None):
    # Configurations
    config = Config(gpu='1',
                    root_dir='./data/train/',
                    root_dir_val='./data/val/',
                    mode='training')

    # Create data feeding pipeline.
    dataset_train = Dataset(config, 'train')
    dataset_val = Dataset(config, 'val')

    # Train Graph
    losses, g_op, d_op, fig = _step(config, dataset_train, training_nn=True)
    losses_val, _, _, fig_val = _step(config, dataset_val, training_nn=False)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver(max_to_keep=50, )
    with tf.Session(config=config.GPU_CONFIG) as sess:
        # Restore the model
        ckpt = tf.train.get_checkpoint_state(config.LOG_DIR)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            last_epoch = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            print('**********************************************************')
            print('Restore from Epoch ' + str(last_epoch))
            print('**********************************************************')
        else:
            init = tf.initializers.global_variables()
            last_epoch = 0
            sess.run(init)
            print('**********************************************************')
            print('Train from scratch.')
            print('**********************************************************')

        avg_loss = Error()
        print_list = {}
        for epoch in range(int(last_epoch), config.MAX_EPOCH):
            start = time.time()
            # Train one epoch
            for step in range(config.STEPS_PER_EPOCH):
                if step % config.G_D_RATIO == 0:
                    _losses = sess.run(losses + [g_op, d_op, fig])
                else:
                    _losses = sess.run(losses + [g_op, fig])

                # Logging
                print_list['g_loss'] = _losses[0]
                print_list['d_loss'] = _losses[1]
                print_list['a_loss'] = _losses[2]
                display_list = ['Epoch '+str(epoch+1)+'-'+str(step+1)+'/'+ str(config.STEPS_PER_EPOCH)+':'] +\
                               [avg_loss(x) for x in print_list.items()]
                print(*display_list + ['          '], end='\r')
                # Visualization
                if step % config.LOG_FR_TRAIN == 0:
                    fname = config.LOG_DIR + '/Epoch-' + str(
                        epoch + 1) + '-' + str(step + 1) + '.png'
                    cv2.imwrite(fname, _losses[-1])

            # Model saving
            saver.save(sess, config.LOG_DIR + '/ckpt', global_step=epoch + 1)
            print('\n', end='\r')

            # Validate one epoch
            for step in range(config.STEPS_PER_EPOCH_VAL):
                _losses = sess.run(losses_val + [fig_val])

                # Logging
                print_list['g_loss'] = _losses[0]
                print_list['d_loss'] = _losses[1]
                print_list['a_loss'] = _losses[2]
                display_list = ['Epoch '+str(epoch+1)+'-Val-'+str(step+1)+'/'+ str(config.STEPS_PER_EPOCH_VAL)+':'] +\
                               [avg_loss(x, val=1) for x in print_list.items()]
                print(*display_list + ['          '], end='\r')
                # Visualization
                if step % config.LOG_FR_TEST == 0:
                    fname = config.LOG_DIR + '/Epoch-' + str(
                        epoch + 1) + '-Val-' + str(step + 1) + '.png'
                    cv2.imwrite(fname, _losses[-1])

            # time of one epoch
            print('\n    Time taken for epoch {} is {:3g} sec'.format(
                epoch + 1,
                time.time() - start))
            avg_loss.reset()
コード例 #20
0
ファイル: main.py プロジェクト: sbrandtb/artshow-keeper
# Configure flask
app = flask.Flask('Artshow')
app.root_path = ROOT_PATH
app.register_blueprint(items_controller.blueprint,
                       url_prefix=items_controller.URL_PREFIX)
app.register_blueprint(auction_controller.blueprint,
                       url_prefix=auction_controller.URL_PREFIX)
app.register_blueprint(reconciliation_controller.blueprint,
                       url_prefix=reconciliation_controller.URL_PREFIX)
app.register_blueprint(settings_controller.blueprint,
                       url_prefix=settings_controller.URL_PREFIX)
app.secret_key = config.SESSION_KEY

# Initialize application
dataset = Dataset(logging.getLogger('dataset'), config.DATA_FOLDER)
dataset.restore()
currency = Currency(logging.getLogger('currency'),
                    dataset,
                    currencyCodes=config.CURRENCY)
model = Model(logging.getLogger('model'), dataset, currency)
dictionaryPath = os.path.join(os.path.dirname(__file__), 'locale')
for language in config.LANGUAGES:
    registerDictionary(
        language,
        PhraseDictionary(
            logging.getLogger('dictionary'),
            os.path.join(dictionaryPath,
                         'translation.{0}.xml'.format(language))))
del dictionaryPath
コード例 #21
0
def train_on_data_ui(model, model_path):
    save_path = model_path
    sg.theme(cfg.UI.THEME)

    layout = [[
        sg.Text("Epoch:"),
        sg.Spin(values=[x for x in range(100)],
                initial_value=5,
                key="-EPOCH-",
                size=(10, 1)),
        sg.Text("Learning Rate:"),
        sg.Input("1e-5", key="-LR-", size=(10, 1)),
        sg.Text("Batch Size"),
        sg.Spin(values=[x for x in range(64)],
                initial_value=2,
                key="-BS-",
                size=(10, 1))
    ],
              [
                  sg.Text("Data annotation file:"),
                  sg.Input(size=(70, 1), key="-PATH-"),
                  sg.FileBrowse(file_types=(("Annotation File",
                                             "annotation.txt"), ("ALL Files",
                                                                 "*.*")),
                                initial_folder="./",
                                size=(9, 1),
                                key="-BROWSE-")
              ], [sg.ProgressBar(100, size=(80, 30), key="-PB-")],
              [
                  sg.Multiline("--Waiting for training task--\n",
                               autoscroll=True,
                               disabled=True,
                               size=(100, 20),
                               key="-OUT-")
              ],
              [
                  sg.B("Start Train", key="-START-", size=(15, 1)),
                  sg.B("Save Model", key="-SAVE-", size=(15, 1),
                       disabled=True),
                  sg.B("Save Model As..", key="-SAVE-AS-", size=(15, 1)),
                  sg.B("Exit", size=(15, 1))
              ]]

    window = Window("Train on Data", layout=layout, font=(cfg.UI.FONT, 12))
    message_queue = Queue()
    train_thread = None
    global amended
    save_thread = None
    dataset = None
    trainer = None

    while True:
        event, value = window.read(timeout=100)
        if event in [None, "Exit"]:
            break

        if event in ["-START-"]:
            epoch = int(value["-EPOCH-"])
            data_path = value["-PATH-"]
            batch_size = int(value["-BS-"])
            lr = float(value["-LR-"])
            try:
                dataset = Dataset(data_path, batch_size)
            except Exception as e:
                sg.popup_error(e)
                continue
            trainer = ModelTrainer(model, dataset, lr, epoch, message_queue)
            train_thread = threading.Thread(target=train_task,
                                            args=(trainer, message_queue),
                                            daemon=True)
            amended = True
            train_thread.start()
            window["-START-"].update(disabled=True)
            window["-SAVE-"].update(disabled=True)
            window["-SAVE-AS-"].update(disabled=True)
            window["-EPOCH-"].update(disabled=True)
            window["-LR-"].update(disabled=True)
            window["-BS-"].update(disabled=True)
            window["-PATH-"].update(disabled=True)
            window["-BROWSE-"].update(disabled=True)

        if event in ["-SAVE-"]:
            if amended:
                save_thread = threading.Thread(target=model.save,
                                               args=(save_path, ))
                save_thread.start()

        if event in ["-SAVE-AS-"]:
            save_as_path = sg.popup_get_folder("Select folder to save model",
                                               "Save As ..",
                                               initial_folder="./")
            if save_as_path is not None:
                save_thread = threading.Thread(target=model.save,
                                               args=(save_as_path, ))
                save_thread.start()
                save_path = save_as_path

        if train_thread is not None:

            while True:
                try:
                    progress, message = message_queue.get_nowait()
                except queue.Empty:
                    break
                if message.startswith("ERROR:"):
                    ml_print_line(window["-OUT-"], message, color="red")
                else:
                    ml_print_line(window["-OUT-"], message)
                if progress is not None:
                    window["-PB-"].UpdateBar(progress)

            if not train_thread.is_alive():
                ml_print_line(window["-OUT-"], "Task Over", color="green")
                train_thread = None
                window["-START-"].update(disabled=False)
                window["-SAVE-"].update(disabled=False)
                window["-SAVE-AS-"].update(disabled=False)
                window["-EPOCH-"].update(disabled=False)
                window["-LR-"].update(disabled=False)
                window["-BS-"].update(disabled=False)
                window["-PATH-"].update(disabled=False)
                window["-BROWSE-"].update(disabled=False)
                dataset = None
                trainer = None

        if save_thread is not None:
            sg.popup_animated(sg.DEFAULT_BASE64_LOADING_GIF,
                              'Saving',
                              time_between_frames=10)
            window.disable()
            if not save_thread.is_alive():
                sg.popup_animated(None)
                sg.popup_ok("Model saved")
                window.enable()
                window.bring_to_front()
                save_thread = None
                amended = False

    if trainer is not None:
        trainer.terminate()
    window.close()
    return model, save_path, amended
コード例 #22
0
def test_dataset(path,
                 dataset=1,
                 scenario=1,
                 n_runs=3,
                 n_subsets=3,
                 k=3,
                 c=1,
                 pca=0):
    # Variables
    data = Dataset()
    runs_performance = {}

    # File Variables
    wb = load_workbook(path)
    ws = wb.active
    row = ws.max_row + 1
    ws.title = 'Test Results'

    # Select test data
    data.choose_data(dataset)
    # print(data.database_selected_str, "data loaded.")

    # Pre-process data
    data.scenario_pre_processing(scenario)
    # print("Finished pre-processing data for", data.scenario_selected_str)

    # Apply Kruskal-Wallis
    data.set_feature_selection_method(2)
    data.dataset = kruskal_wallis(data.dataset, len(data.dataset['label']))
    # print("Finished applying kruskal-wallis feature selection method.")

    # Apply Correlation redundancy measure
    data.dataset, unused_label = redundancy_measure(data.dataset)
    # print("Correlation rendundancy measure applied.")
    # print("Begining tests...this might take a while")

    if pca == 1:
        data.dataset = run_pca(data.dataset, len(data.dataset['label']))

    # For all 5 classifiers
    for classifier in range(1, 6):
        # Variable to hold all runs for all classifiers
        runs_performance[classifier] = {}

        if classifier == 5:
            n_runs = int(n_runs / 5)
            n_subsets = 3

        # Run "n_runs" tests
        for run in range(0, n_runs):
            # Structure to hold results of classification
            performance = {
                'fp': 0,
                'fn': 0,
                'tp': 0,
                'tn': 0,
                'accuracy': 0,
                'avg_misclassification': 0,
                'misclassification_per_fold': [],
                'avg_misclassification_per_fold': [],
                'sensitivity': 0,
                'specificity': 0
            }

            print("run %s for classifier %s" % (str(run), str(classifier)))
            # Create dict to save run results
            runs_performance[classifier][run] = {}

            # Apply K-fold: splitting the dataset
            kf = KFold(n_splits=n_subsets, shuffle=True)

            # K-fold Executions
            for idx_train, idx_test in kf.split(data.dataset["data"],
                                                data.dataset["target"]):
                # Classification prediction
                prediction = []

                # Prepare data for training
                x_train = [data.dataset["data"][idx] for idx in idx_train]
                x_train = np.asarray(x_train).astype(np.float64)
                y_train = [data.dataset["target"][idx] for idx in idx_train]

                # Prepare data for testing
                x_test = [data.dataset["data"][idx] for idx in idx_test]
                x_test = np.asarray(x_test).astype(np.float64)
                y_test = [data.dataset["target"][idx] for idx in idx_test]

                # Minimum distance classifier (MDC)
                if classifier == 1:
                    prediction = minimum_distance_classifier(
                        x_train, y_train, x_test, y_test)

                # Fisher Discriminant Analisys (Fisher LDA)
                elif classifier == 2:
                    prediction = fisher_discriminant_analisys(
                        x_train, y_train, x_test, y_test)

                # K-Nearest Neighbors (KNN)
                elif classifier == 3:
                    prediction = k_nearest_neighbors(x_train, y_train, x_test,
                                                     y_test, k)

                # Bayes Classifier
                elif classifier == 4:
                    prediction = bayes_classifier(x_train, y_train, x_test,
                                                  y_test)

                # Support Vector Machines
                elif classifier == 5:
                    prediction = support_vector_machines(
                        x_train, y_train, x_test, y_test, c)

                # Performance measurement
                performance = performance_measurement(y_test, prediction,
                                                      scenario, performance)

            # Calculate averages
            performance['avg_misclassification'] /= n_subsets
            performance['sensitivity'] /= n_subsets
            performance['specificity'] /= n_subsets
            performance['accuracy'] /= n_subsets

            # Set Layout
            set_layout(ws, scenario)

            # Add values into the sheet
            ws.cell(column=1, row=row, value=dataset)
            ws.cell(column=2, row=row, value=run)
            ws.cell(column=3, row=row, value=classifier)
            set_values(ws, scenario, performance, row)
            row += 1

            # Save performance measurement per run
            runs_performance[classifier][run]["performance"] = performance
            runs_performance[classifier][run]["scenario"] = scenario

    # For debug
    # for classifier in runs_performance:
    #     for run in runs_performance[classifier]:
    #         print("Classifier ", classifier, " run", run)
    #         print(runs_performance[classifier][run])

    return wb
コード例 #23
0
ファイル: coco.py プロジェクト: agillbraun/ImageRepository
  Label(('food', 'dessert', 'donut')),
  Label(('food', 'dessert', 'cake')),
  Label(('object', 'furniture', 'chair')),
  Label(('object', 'furniture', 'couch')),
  Label(('object', 'furniture', 'decoration', 'potted plant')),
  Label(('object', 'furniture', 'bed'), related=('bedroom',)),
  Label(('object', 'furniture', 'dining table')),
  Label(('object', 'fixture', 'toilet'), related=('bathroom',)),
  Label(('object', 'technology', 'television'), alt=('tv',)),
  Label(('object', 'technology', 'laptop')),
  Label(('object', 'technology', 'mouse')),
  Label(('object', 'technology', 'remote')),
  Label(('object', 'technology', 'keyboard')),
  Label(('object', 'technology', 'cell phone'), alt=('phone', 'mobile phone')),
  Label(('object', 'appliance', 'microwave')),
  Label(('object', 'appliance', 'oven')),
  Label(('object', 'appliance', 'toaster')),
  Label(('object', 'fixture', 'sink')),
  Label(('object', 'appliance', 'refrigerator')),
  Label(('object', 'book')),
  Label(('object', 'technology', 'clock')),
  Label(('object', 'furniture', 'decoration', 'vase')),
  Label(('object', 'tool', 'scissors')),
  Label(('object', 'teddy bear')),
  Label(('object', 'appliance', 'hair drier')),
  Label(('object', 'toothbrush')),
]

Coco = Dataset('COCO 2017')
Coco.register(coco_labels)