Exemplo n.º 1
0
    def __init__(self, options):
        """Prepare the network, criterion, solver, and data.

        Args:
            options, dict: Hyperparameters.
        """
        print('Prepare the network and data.')
        self._options = options
        # Network.
        self._net = torch.nn.DataParallel(BCNN()).cuda()
        # Load the model from disk.
        #self._net.load_state_dict(torch.load(self._path['model']))
        print(self._net)
        # Criterion.
        self._criterion = torch.nn.CrossEntropyLoss().cuda()
        # Solver.
        self._solver = torch.optim.SGD(
            self._net.parameters(), lr=self._options['base_lr'],
            momentum=0.9, weight_decay=self._options['weight_decay'])
        self._scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self._solver, mode='max', factor=0.1, patience=3, verbose=True,
            threshold=1e-4)

        self._train_path = os.path.join(self._options['text_path'],'train.txt')
        self._test_path = os.path.join(self._options['text_path'],'test.txt')

        #Dataloader
        transform = T.Compose([
        T.Resize(448), 
        T.CenterCrop(448), 
        T.ToTensor(), 
        T.Normalize(mean=(0.485, 0.456, 0.406),
                    std=(0.229, 0.224, 0.225)) 
        ])

        train_data = Data( train_path = self._train_path, aug_path = options['aug_data'], img_transform=transform)



        test_data = Data( train_path = self._test_path, aug_path = options['aug_data'], img_transform=transform)



        self._train_loader = torch.utils.data.DataLoader(dataset=train_data,
                                                   batch_size=self._options['batch_size'],drop_last=True, pin_memory=True,
                                                   shuffle=True,num_workers=4)

        self._test_loader = torch.utils.data.DataLoader(dataset=test_data,
                                                  batch_size=self._options['batch_size'],pin_memory=True,
                                                  shuffle=False,num_workers=4)
Exemplo n.º 2
0
def main():
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    time_list = []
    for i in range(5):# 5 runs for average
        args.id = str(i)# id
        tf.reset_default_graph()
        with tf.Session(config=config) as sess:
            args.result = os.path.join(args.result, args.id)
            args.log = os.path.join(args.log, args.id)
            args.model = os.path.join(args.model, args.id)
            args.tfrecords = os.path.join(args.tfrecords, args.id)
            if not os.path.exists(args.model):
                os.mkdir(args.model)
            if not os.path.exists(args.log):
                os.mkdir(args.log)
            if not os.path.exists(args.result):
                os.mkdir(args.result)
            if not os.path.exists(args.tfrecords):
                os.mkdir(args.tfrecords)

            data_model = Data(args)
            data_model.read_data()
            dataset_train = data_model.data_parse(
                os.path.join(args.tfrecords, 'train_data.tfrecords'), type='train')
            dataset_test = data_model.data_parse(
                os.path.join(args.tfrecords, 'test_data.tfrecords'), type='test')
            dataset_sae_train = data_model.data_parse(
                os.path.join(args.tfrecords, 'sae_train_data_' + str(args.sae_ratio) + '.tfrecords'), type='sae_train')
            dataset_sae_test = data_model.data_parse(
                os.path.join(args.tfrecords, 'sae_test_data.tfrecords'), type='sae_test')
            dataset_de_map = data_model.data_parse(
                os.path.join(args.tfrecords, 'demap_data' + '.tfrecords'), type='demap')

            model = Model(args,sess)
            if args.load_model:
                model.load(args.model)
            else:
                model.train(dataset_train,dataset_sae_train,data_model)
            a = datetime.now()
            model.test(dataset_test)
            b = datetime.now()
            time_list.append((b-a).total_seconds())
            if args.get_decode_map:
                model.save_decode_map(dataset_de_map)
            if args.get_decode_image:
                model.get_decode_image(dataset_sae_test)
            if args.get_feature:
                model.get_feature(dataset_sae_test)
            args.result = 'result'
            args.log = 'log'
            args.tfrecords = 'tfrecords'
            args.model = 'model'
Exemplo n.º 3
0
    def __getitem__(self, idx):
        sample = create_empty_data()._asdict()
        cur = len(self.pred_frames)
        if self.opt.fake_test:
            for i in range(0, len(self.opt.prefix)):
                j = self.opt.prefix[-1 - i]
                sample['prefix'].append(
                    self.move(self.base_img,
                              len(self.opt.prefix) - i + self.offset))

            sample['unstable'].append(
                self.move(self.base_img, random.uniform(-5, 5)))
            sample["target"].append(
                self.move(self.unstable_frames[0], self.offset))
        else:
            for i in range(0, len(self.opt.prefix)):
                j = self.opt.prefix[len(self.opt.prefix) - 1 - i]
                if j > len(self.pred_frames):
                    sample["prefix"].append(self.unstable_frames[0])
                else:
                    sample["prefix"].append(
                        self.pred_frames[len(self.pred_frames) - j])
            sample["unstable"].append(self.unstable_frames[len(
                self.pred_frames)])
            sample["target"].append(self.stable_frames[len(self.pred_frames)])
        sample = get_transform(self.opt, isTrain=self.opt.isTrain)(sample)
        sample = Data(**sample)
        return sample
Exemplo n.º 4
0
def main():

    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    for i in range(1):
        args.id = str(i)
        tf.reset_default_graph()
        with tf.Session(config=config) as sess:
            args.result = os.path.join(args.result, args.id)
            args.log = os.path.join(args.log, args.id)
            args.model = os.path.join(args.model, args.id)
            args.tfrecords = os.path.join(args.tfrecords, args.id)
            if not os.path.exists(args.model):
                os.mkdir(args.model)
            if not os.path.exists(args.log):
                os.mkdir(args.log)
            if not os.path.exists(args.result):
                os.mkdir(args.result)
            if not os.path.exists(args.tfrecords):
                os.mkdir(args.tfrecords)

            dataset = Data(args)
            dataset.read_data()
            train_dataset = dataset.data_parse(os.path.join(
                args.tfrecords, 'train_data.tfrecords'),
                                               type='train')
            # test_dataset = dataset.data_parse(os.path.join(args.tfrecords, 'test_data.tfrecords'), type='test')
            # map_dataset = dataset.data_parse(os.path.join(args.tfrecords, 'map_data.tfrecords'), type='map')

            model = Model(args, sess)
            if not args.load_model:
                model.train(train_dataset, dataset)
            else:
                model.load(args.model)
            model.test(dataset)
            if args.save_decode_map:
                model.save_decode_map(dataset)
            args.result = 'result'
            args.log = 'log'
            args.tfrecords = 'tfrecords'
            args.model = 'model'
Exemplo n.º 5
0
def validate(epoch, isEval=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    dict_losses = DictAverageMeter()
    # switch to train mode
    evalStr = 'NoEval'
    if isEval:
        model.eval()
        evalStr = ''

    end = time.time()
    for i, data_raw in enumerate(val_dataloader):
        if i == opt.val_iters: break
        data = data_raw
        if opt.gpu_ids:
            data = map_data(lambda x: Variable(x.cuda(), volatile=True), data)
        else:
            data = map_data(lambda x: Variable(x, volatile=True), data)
        data = Data(*data)
        data_time.update(time.time() - end)
        output = model.forward(data)
        warpped = output.warpped
        loss = criterion(output, data)

        # measure accuracy and record loss
        losses.update(loss.data[0], opt.batch_size)
        dict_losses.update(criterion.summary(), opt.batch_size)

    all_loss = dict_losses.avg
    print('{evalStr}Validation: Epoch: [{0}]\t'
          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
          'Total Time {1:.3f}\n\t'
          'ALl Loss {all_loss}'.format(epoch,
                                       time.time() - end,
                                       loss=losses,
                                       all_loss=all_loss,
                                       evalStr=evalStr))
    for sid in range(data.fm[0].shape[0]):
        visualize(data,
                  warpped,
                  global_step,
                  sid,
                  opt,
                  mode='both',
                  name='{}val'.format(evalStr))
    tl.log_value('{}val/Loss'.format(evalStr), losses.val, global_step)
    tl.log_value('{}val/Learning Rate'.format(evalStr),
                 scheduler.get_lr()[0], global_step)
    # tl.log_value('val/Batch Time', batch_time.val, global_step)
    tl.log_value('{}val/Data Time'.format(evalStr), data_time.val, global_step)
    for k, v in all_loss.items():
        tl.log_value('{}val/loss/'.format(evalStr) + k, v, global_step)
    model.train()
    return losses.val
Exemplo n.º 6
0
 def set_datas(self):
     data = Data()
     global_vars = Global(data)
     terrain_container_test = Terrain_Container(data.terrain_map,
                                                global_vars.terrainBank)
     person_container_test = Person_Container(data.map_armylist,
                                              global_vars.personBank)
     self.map = map_controller.Main(terrain_container_test,
                                    person_container_test, global_vars)
     self.w = terrain_container_test.M
     self.h = terrain_container_test.N
Exemplo n.º 7
0
 def train(self):
     # d = Data()
     fast_text_embed = FastText(Data().get_tokens(),
                                size=self.embed_size,
                                window=self.window_size,
                                min_count=self.min_freq,
                                sample=self.down_sampling,
                                workers=4,
                                sg=self.sg,
                                iter=100)
     return fast_text_embed
Exemplo n.º 8
0
def main():
    data = Data()
    gmh = GoogleMapsHandler()
    import random
    ids = random.sample(data._id2shop.keys(), 10)
    print(ids)
    shops = [data[identifier] for identifier in ids]
    init = time()
    for shop in shops:
        gmh.get_shop_info(shop)
        gmh.get_popular_times(shop)
    print(f'All data found in {time() - init:.2f}s')
Exemplo n.º 9
0
    def __init__(self):
        pyglet.resource.path = ['../img']
        pyglet.resource.reindex()
        self.size = 80
        self.select = (0, 0)
        self.origin_color = WHITE
        data = Data()
        global_vars = Global(data)
        terrain_container_test = Terrain_Container(data.terrain_map,
                                                   global_vars.terrainBank)
        person_container_test = Person_Container(data.map_armylist,
                                                 global_vars.personBank)
        map1 = map_controller.Main(terrain_container_test,
                                   person_container_test, global_vars)
        self.w = terrain_container_test.M
        self.h = terrain_container_test.N

        super(Arena, self).__init__(r=0,
                                    g=0,
                                    b=0,
                                    a=255,
                                    width=self.w * self.size,
                                    height=self.h * self.size)
        self.tiles = []
        for x in range(self.w):
            tl_x = []
            for y in range(self.h):
                tile = Tile(pos=coordinate(x, y, self.size), size=self.size)
                self.add(tile)
                tl_x.append(tile)
            self.tiles.append(tl_x)

        self.repaint(map1)
        self.map = map1
        self.text = cocos.text.RichLabel('ROUND 1',
                                         font_name='times new roman',
                                         font_size=36,
                                         position=(0, 420),
                                         color=(127, 255, 170, 255))
        self.add(self.text)
        self.end_turn = Sprite(image='ring.png',
                               position=(560, 200),
                               color=MAROON,
                               scale=0.8)

        self.add(self.end_turn)
        self.add(
            cocos.text.RichLabel(text='END', position=(520, 190),
                                 font_size=30))

        self.next_round()
Exemplo n.º 10
0
def main(args):
    run_name = f'batch{args.batch_size}-epochs{args.epochs}-lstm{args.layers}x{args.width}-lookback{args.lookback}.{args.name}'

    data = Data(lookback=args.lookback, batch_size=args.batch_size)
    assert not np.isnan(data.features).any()

    n_features = data.train.features.shape[2]

    if args.load_model:
        model = keras.models.load_model(args.model_path)
    else:
        model = build_model(args.width,
                            args.batch_size,
                            args.lookback,
                            n_features,
                            args.layers,
                            output_threshold=vasopressin_threshold)

    checkpoint = keras.callbacks.ModelCheckpoint('models/model.hdf5',
                                                 monitor='val_loss',
                                                 verbose=0,
                                                 save_best_only=True,
                                                 save_weights_only=False,
                                                 mode='auto')
    history = []
    for i in range(args.epochs):
        history.append(
            model.fit(
                x=data.train.features,
                y=data.train.vasopressin,
                batch_size=args.batch_size,
                epochs=1,
                validation_data=(data.validate.features,
                                 data.validate.vasopressin),
                verbose=2,
                shuffle=False,
                callbacks=[
                    checkpoint,
                ],
            ))
        print(f"Epoch {i}/{args.epochs}")
        model.reset_states()

    model.save(f'models/{run_name}.hdf5')
Exemplo n.º 11
0
 def initUI(self):
     self.palette = QPalette()
     self.palette.setColor(self.backgroundRole(), BLACK)  # 设置背景颜色
     # palette1.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap('../../../Document/images/17_big.jpg')))   # 设置背景图片
     self.setPalette(self.palette)
     self.setAutoFillBackground(True)
     self.setGeometry(300, 300, 800, 600)
     self.setWindowTitle('3X_Qt')
     data = Data()
     global_vars = Global(data)
     terrain_container_test = Terrain_Container(data.terrain_map,
                                                global_vars.terrainBank)
     person_container_test = Person_Container(data.map_armylist,
                                              global_vars.personBank)
     self.map = map_controller.Main(terrain_container_test,
                                    person_container_test, global_vars)
     self.w = terrain_container_test.M
     self.h = terrain_container_test.N
     self.show()
     self.select = (-1, -1)
     self.on_mouse = (-1, -1)
def train_model(model,
                sess,
                learning_rate=LEARNING_RATE,
                batch_size=BATCH_SIZE,
                num_epochs=NUM_EPOCHS,
                num_steps=NUM_STEPS,
                patience=PATIENCE,
                log_dir=LOG_DIR,
                log_freq=LOG_FREQ):
    data_obj = Data()
    X_val, y_val = sess.run([data_obj.X_val, data_obj.y_val])
    logger = Logger(log_dir=log_dir,
                    num_epochs=num_epochs,
                    num_steps=num_steps)

    for epoch in range(num_epochs):
        #initialize per_epoch variables
        print("\nBeginning epoch %d" % (epoch + 1))
        logger.start_time = time.time()
        for batch in range(num_steps):
            X_batch, y_batch = sess.run([data_obj.X_batch, data_obj.y_batch])
            # Train on single batch
            metrics = model.train_on_batch(X_batch, y_batch)
            logger.train_metrics['loss'][batch] = metrics[0]
            logger.train_metrics['acc'][batch] = metrics[1]

            if batch % log_freq == 0 and batch != 0:  # Log training metrics every 'log_freq' batches
                logger.log_batch_metrics(epoch, batch)

        val_metrics = model.evaluate(X_val, y_val)
        logger.log_epoch_metrics(epoch, val_metrics)
        logger.save_model(epoch)
        if logger.early_stopping(epoch, patience):
            break  ## Stop training if loss is not decreasing

    test_metrics = test_model(
        data_obj=data_obj, model=model,
        sess=sess)  # Evaluate performance on external test set
    logger.log_test_metrics(test_metrics)
    sess.close()
Exemplo n.º 13
0
def main(config):
    # For fast training
    cudnn.benchmark = True
    
    print('number class:', config.n_class)

    # Data loader 
    data_loader = Data(config) # Data_Loader(config.train, config.dataset, config.image_path, config.imsize, config.batch_size, shuf=config.train)

    # Create directories if not exist
    make_folder(config.model_save_path, config.version)
    make_folder(config.sample_path, config.version)
    make_folder(config.log_path, config.version)
    make_folder(config.attn_path, config.version)

    print('config data_loader and build logs folder')

    if config.train:
        trainer = Trainer(data_loader.train_loader, config)
        trainer.train()
    else:
        tester = Tester(data_loader.test_loader, config)
        tester.test()
Exemplo n.º 14
0
Arquivo: main.py Projeto: zy47/UKL
def main():

    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # set your GPU ID
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = False

    for i in range(1):
        args.id = str(i)
        tf.reset_default_graph()
        with tf.Session(config=config) as sess:
            args.result = os.path.join(args.result, args.id)
            args.log = os.path.join(args.log, args.id)
            args.model = os.path.join(args.model, args.id)
            args.tfrecords = os.path.join(args.tfrecords, args.id)
            if not os.path.exists(args.model):
                os.mkdir(args.model)
            if not os.path.exists(args.log):
                os.mkdir(args.log)
            if not os.path.exists(args.result):
                os.mkdir(args.result)
            if not os.path.exists(args.tfrecords):
                os.mkdir(args.tfrecords)

            dataset = Data(args)
            dataset.read_data_cluster()
            dataset.read_data()
            model = Model(args, sess)
            if not args.load_model:
                model.train(dataset)
            else:
                model.load(args.model)
                model.test2(dataset)

            if args.save_decode_map:
                model.save_decode_map(dataset)
            if args.save_decode_segmap:
                model.save_decode_seg_map(dataset)
            if args.del_tfrecords:
                shutil.rmtree(args.tfrecords)
            args.result = 'result'
            args.log = 'log'
            args.tfrecords = 'tfrecords'
            args.model = 'model'
Exemplo n.º 15
0
    def  __init__(self):

        pyglet.resource.path = ['../img']
        pyglet.resource.reindex()
        self.size = 80
        self.select = None #当前选中的角色
        self.state = 'none'
        data = Data()
        global_vars = Global(data)
        terrain_container_test = Terrain_Container(data.terrain_map, global_vars.terrainBank)
        person_container_test = Person_Container(data.map_armylist, global_vars.personBank)
        map1 = map_controller.Main(terrain_container_test, person_container_test, global_vars)
        self.w = terrain_container_test.M
        self.h = terrain_container_test.N

        super(Arena, self).__init__(r=0,g=0,b=0,a=255,width=self.w*self.size,height=self.h*self.size)

        self.tiles = []
        for x in range(self.w):
            tl_x = []
            for y in range(self.h):
                tile = MapCell(pos=coordinate(x, y, self.size), size=self.size)
                self.add(tile)
                tl_x.append(tile)
            self.tiles.append(tl_x)

        self.map = map1
        position = map1.person_container.position
        controller = map1.person_container.controller
        people = map1.person_container.people
        self.person = {}
        self.map2per = {}
        for p in people:
            id = p.pid
            (x, y) = position[id]
            if controller[id] == 1:
                state = 'enemy'
            else:
                state = 'self'
            self.map2per[(x, y)] = p
            self.person[id] = MapPer(person=p, pos=coordinate(x, y, self.size), size=self.size, state=state)
            self.add(self.person[id])

        self.text = cocos.text.RichLabel('ROUND 1' ,
                                     font_name='times new roman',
                                     font_size=36,
                                     position=(0, 420),
                                    color = (127, 255, 170, 255))
        self.add(self.text)
        self.end_turn = Sprite(image='ring.png', position=(560,200), color=MAROON, scale=0.8)

        self.add(self.end_turn)
        self.add(cocos.text.RichLabel(text='END', position=(520, 190), font_size=30))
        self.mapstate = self.map.send_mapstate()

        self.highlight = set()
        self.mouse_select = None
        self.target = None
        self.item = None
        self.origin_color = None
        self.mark = set()
        self.add(Audiolayer())

        self.next_round()
Exemplo n.º 16
0
def main():
    msg = """
            Usage:
            Training: 
                python generate.py --mode train --clas novel
            Sampling:
                python generate.py --mode sample --clas poetry --start 两个黄鹂鸣翠柳
                python generate.py --mode sample --clas novel --start --num 200 两个黄鹂鸣翠柳
            """

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--mode',
        type=str,
        default='sample',
        help='usage: train, con-train or sample, sample is default')

    parser.add_argument('--clas',
                        type=str,
                        default='poetry',
                        help='novel or poetry, poetry is default')

    parser.add_argument('--start', type=str, default='', help='')

    parser.add_argument('--head', type=str, default='', help='')

    parser.add_argument('--num', type=int, help='generation word number ')

    args = parser.parse_args()

    if args.mode == 'sample':
        infer = True
        if args.clas == 'novel':
            data = Data(novel_data_dir,
                        novel_input_file,
                        novel_vocab_file,
                        novel_tensor_file,
                        batch_size=batch_size)
            model = Model(data=data,
                          infer=infer,
                          num_layers=num_layers,
                          layers_size=layers_size)
        else:
            data = Data(data_dir,
                        input_file,
                        vocab_file,
                        tensor_file,
                        batch_size=batch_size)
            model = Model(data=data,
                          infer=infer,
                          num_layers=num_layers,
                          layers_size=layers_size)

        print(
            sample(data,
                   model,
                   head=args.head,
                   cals=args.clas,
                   start=args.start,
                   num=args.num))

    elif args.mode == 'train' or args.mode == 'con-train':
        global is_continue_train
        if args.mode == 'con-train':
            is_continue_train = True
        infer = False
        global clas
        clas = args.clas

        if args.clas == 'novel':
            data = Data(novel_data_dir,
                        novel_input_file,
                        novel_vocab_file,
                        novel_tensor_file,
                        seq_len=seq_len,
                        batch_size=batch_size)
            model = Model(data=data,
                          infer=infer,
                          num_layers=num_layers,
                          layers_size=layers_size)
            print(train(data, model))
        elif args.clas == 'poetry':
            data = Data(data_dir,
                        input_file,
                        vocab_file,
                        tensor_file,
                        clas='poetry',
                        batch_size=batch_size)
            model = Model(data=data,
                          infer=infer,
                          num_layers=num_layers,
                          layers_size=layers_size)
            print(train(data, model))
        else:
            print(msg)
    else:
        print(msg)
Exemplo n.º 17
0
    def train(self):
        x = tf.placeholder(
            tf.float32,
            [self.args.BATCH_SIZE, self.args.IMG_H, self.args.IMG_W, 3],
            name='x-input')
        sp = tf.placeholder(
            tf.float32,
            [self.args.BATCH_SIZE, self.args.IMG_H, self.args.IMG_W, 1],
            name='sp-input')
        #y_ = tf.placeholder(tf.float32,[self.args.BATCH_SIZE, 415, 1279, 1],name='y-input')
        y_ = tf.placeholder(
            tf.float32,
            [self.args.BATCH_SIZE, self.args.IMG_H, self.args.IMG_W, 1],
            name='y-input')
        dataset = Data(self.args)

        #x=dataset.data_argument(x)#data argument
        #x = tf.image.per_image_standardization(x)
        #x = tf.image.per_image_standardization(x)
        #sp = tf.image.per_image_standardization(sp)
        #y_ = tf.image.per_image_standardization(y_)

        #x=dataset.data_argument(x)

        #sp = tf.image.per_image_standardization(sp)
        #y_== tf.image.per_image_standardization(y_)
        mask = self.get_mask(y_)
        mask_num = tf.reduce_sum(mask)
        mask_num = mask_num

        print("mask_sum")
        print(mask_num)

        mask_add = tf.multiply(mask, -1.0)
        mask_add = tf.add(mask_add, 1.0)
        #mask_add=tf.multiply(mask_add,0.1)

        #y_2=tf.multiply(y_,mask)
        #sp2=tf.multiply(sp,mask)
        #x2=tf.multiply(x,mask)
        x2 = x
        sp2 = sp
        y_2 = y_

        #sp2=tf.add(sp2,mask_add)
        #y_2=tf.add(y_2,mask_add)
        #x2=tf.add(x2,mask_add)

        net = Model(self.args)
        #pre_depth,rgb_depth,sp_depth,igf=net.network3(x2,sp2,net.trainingmode)
        pre_depth, rgb_depth, sp_depth, igs, my_see = net.network(
            x2, sp2, net.trainingmode)

        pre_depth2 = tf.multiply(pre_depth, mask)
        rgb_depth2 = tf.multiply(rgb_depth, mask)
        sp_depth2 = tf.multiply(sp_depth, mask)

        #pre_depth2=pre_depth
        #rgb_depth2=rgb_depth
        #sp_depth2=sp_depth

        #pre_depth2=pre_depth#tf.add(pre_depth2,mask_add)#############
        #rgb_depth2=rgb_depth#tf.add(rgb_depth2,mask_add)###########
        #sp_depth2=sp_depth#tf.add(sp_depth2,mask_add)###############

        train_loss = net.loss(pre_depth2, rgb_depth2, sp_depth2,
                              y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W

        MAE = net.MAE_loss(pre_depth2,
                           y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #MAE=tf.divide(MAE,mask_num)
        #MAE=(MAE)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W

        iMAE = net.iMAE_loss(pre_depth2,
                             y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #iMAE=tf.divide(iMAE,mask_num)
        #iMAE=(iMAE)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W

        iRMSE = net.iRMSE_loss(pre_depth2,
                               y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #iRMSEs=tf.divide(iRMSEs,mask_num)
        #iRMSEs=(iRMSEs)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W

        RMSE = net.RMSE_loss(pre_depth2,
                             y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #RMSE=tf.divide(RMSE,mask_num)
        #RMSE=(RMSE)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W

        iRMSE_rgbb = net.iRMSE_loss(
            rgb_depth2, y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W

        erro_map = tf.abs(tf.subtract(pre_depth2, y_2))
        erro_rgb = tf.abs(tf.subtract(rgb_depth2, y_2))
        erro_sp = tf.abs(tf.subtract(sp_depth2, y_2))

        learning_rate = tf.train.exponential_decay(self.learning_rate,
                                                   global_step=net.global_step,
                                                   decay_steps=1000,
                                                   decay_rate=0.2)
        train_op, regularization_loss = net.optimize(learning_rate)

        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.9
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            tf.global_variables_initializer().run()
            for i in range(self.args.epoch):
                imgrgb, imgrsd, imgdd = dataset.get_data()
                imgdd = imgdd + 1

                #step,youtput,loss_value,op,lr,mask_,erro_map_,rl,rgbp,spp,iMAE_,MAE_,iRMSEs_,RMSE_= sess.run([net.global_step,pre_depth2,train_loss,train_op,learning_rate,mask,erro_map,regularization_loss,rgb_depth2,sp_depth2,iMAE,MAE,iRMSEs,RMSE],feed_dict={x: imgrgb, sp:imgrsd, y_: imgdd})
                #step,youtput,loss_value,op,lr,erro_map_,rl,rgbp,spp,iMAE_,MAE_,iRMSEs_,RMSE_,sp_depth2_,mask_,y_2_,sp2_,mask_add_,mask_num_,igf4= sess.run([net.global_step,pre_depth2,train_loss,train_op,learning_rate,erro_map,regularization_loss,rgb_depth2,sp_depth2,iMAE,MAE,iRMSEs,RMSE,sp_depth2,mask,y_2,sp2,mask_add,mask_num,igf[4]],feed_dict={x: imgrgb, sp:imgrsd, y_: imgdd})

                step, loss_value, train_op_, lr, rl, mask_num_ = sess.run(
                    [
                        net.global_step, train_loss, train_op, learning_rate,
                        regularization_loss, mask_num
                    ],
                    feed_dict={
                        x: imgrgb,
                        sp: imgrsd,
                        y_: imgdd
                    })
                youtput, sp2_, y_2_ = sess.run([sp_depth2, sp2, y_2],
                                               feed_dict={
                                                   x: imgrgb,
                                                   sp: imgrsd,
                                                   y_: imgdd
                                               })
                rgb_d2, sp_d2 = sess.run([rgb_depth2, sp_depth2],
                                         feed_dict={
                                             x: imgrgb,
                                             sp: imgrsd,
                                             y_: imgdd
                                         })
                #igf4,igf3,igf2=sess.run([igf[4],igf[3],igf[2]],feed_dict={x: imgrgb, sp:imgrsd, y_: imgdd})
                erro_map_, erro_rgb_, erro_sp_, mask_ = sess.run(
                    [erro_map, erro_rgb, erro_sp, mask],
                    feed_dict={
                        x: imgrgb,
                        sp: imgrsd,
                        y_: imgdd
                    })
                MAE_, iMAE_, RMSE_, iRMSE_ = sess.run([MAE, iMAE, RMSE, iRMSE],
                                                      feed_dict={
                                                          x: imgrgb,
                                                          sp: imgrsd,
                                                          y_: imgdd
                                                      })

                iRMSE_rgbb_ = sess.run([iRMSE_rgbb],
                                       feed_dict={
                                           x: imgrgb,
                                           sp: imgrsd,
                                           y_: imgdd
                                       })

                igs_ = sess.run(igs,
                                feed_dict={
                                    x: imgrgb,
                                    sp: imgrsd,
                                    y_: imgdd
                                })

                my_see_ = sess.run(my_see,
                                   feed_dict={
                                       x: imgrgb,
                                       sp: imgrsd,
                                       y_: imgdd
                                   })

                pre_depth2_ = sess.run(pre_depth2,
                                       feed_dict={
                                           x: imgrgb,
                                           sp: imgrsd,
                                           y_: imgdd
                                       })

                sh_my_see_ = np.array(my_see_)

                sh_my_see_ = sh_my_see_.reshape((40, 64))
                sh_my_see_ = sh_my_see_ * 1000
                #print(sh_my_see_)

                #imae=0
                #for i in range(pre_depth2_.shape[1]):
                #    for j in range(pre_depth2_.shape[2]):
                #        if pre_depth2_[0,i,j,0]!=0 and y_2_[0,i,j,0]!=0:
                #            a=pre_depth2_[0,i,j,0]
                #            b=y_2_[0,i,j,0]
                #            a=a/1000000.0
                #            b=b/1000000.0
                #            c=a-b
                ##            d=a*b
                #            imae=imae+abs(c/d)
                ##imae=imae/self.args.IMG_H/self.args.IMG_W
                #print("my test nnnnnnnnnnnnnnnnnnnnnn imae",imae)

                # #rgb_SE_=sess.run(rgb_SE,feed_dict={x: imgrgb, sp:imgrsd, y_: imgdd})

                #print(rgb_SE_[0])
                print(
                    "Training step: %d, loss: %g  ,ime:%g,mae:%g,irmse:%g,rmse:%g,regularization_loss: %g ,learning_rate :%.8f"
                    % (step, loss_value, iMAE_, MAE_, iRMSE_, RMSE_, rl, lr))
                #print(pre_depth2_)
                if i % 50 == 0:
                    self.train_result.update({i: MAE_})
                    #print(mask_)
                if i % 600 == 0 and i != 0:
                    self.test(sess)
                    l_test_rloss = list(self.test_result.values())
                    average_a = np.mean(l_test_rloss)
                    self.test_average_loss.update(
                        {i / self.test_record_step: average_a})
                '''
                if i%self.train_record_step==0 and i!=0:
                    plt.figure()
                    
                    plt.subplot(431)
                    plt.axis('off') 
                    plt.title('rgb',fontsize='medium',fontweight='bold')
                    plt.imshow(imgrgb[0,:,:,:])
                    
                    
                    plt.subplot(432) 
                    plt.axis('off') 
                    plt.title('spare depth map',fontsize='medium',fontweight='bold')
                    plt.imshow(sp2_[0,:,:,0])
                    
                    plt.subplot(433)
                    plt.axis('off') 
                    plt.title('dense depth map',fontsize='medium',fontweight='bold')
                    plt.imshow(y_2_[0,:,:,0])
                    
                    plt.subplot(434)
                    plt.axis('off') 
                    plt.title('predicted depth map',fontsize='medium',fontweight='bold')
                    plt.imshow(youtput[0,:,:,0])
                    
                    plt.subplot(435)
                    plt.axis('off') 
                    plt.title('rgb depth',fontsize='medium',fontweight='bold')
                    plt.imshow(rgb_d2[0,:,:,0])
                    
                    plt.subplot(436)
                    plt.axis('off') 
                    plt.title('sp map',fontsize='medium',fontweight='bold')
                    plt.imshow(sp_d2[0,:,:,0])
                    
                    plt.subplot(437)
                    plt.axis('off') 
                    plt.title('erro predict',fontsize='medium',fontweight='bold')
                    plt.imshow(erro_map_[0,:,:,0],cmap='hot')
                    
                    plt.subplot(438)
                    plt.axis('off') 
                    plt.title('erro rgb',fontsize='medium',fontweight='bold')
                    plt.imshow(erro_rgb_[0,:,:,0],cmap='hot')
                    
                    plt.subplot(439)
                    plt.axis('off') 
                    plt.title('erro sp',fontsize='medium',fontweight='bold')
                    plt.imshow(erro_sp_[0,:,:,0],cmap='hot')
                    
                    
                    
                    
                    plt.savefig("./train_output/"+"step"+str(i)+"loss"+str(loss_value)+".png") 
                    plt.close() 
                    
                    #sio.savemat("./train_output/imgdd"+str(i)+".mat", {'imgdd':y_2_[0,:,:,0]})
                    #sio.savemat("./train_output/predict"+str(i)+".mat", {'predict':youtput[0,:,:,0]})
                    #sio.savemat("./train_output/mask.mat", {'mask':mask_[0,:,:]})
                    plt.figure()
                    plt.subplot(2,3,1)
                    plt.axis('off') 
                    plt.title('guided image filter1',fontsize='medium',fontweight='bold')
                    plt.imshow(igs_[0][0,:,:,0],cmap="hsv")
                    
                    
                    plt.subplot(2,3,2)
                    plt.axis('off') 
                    plt.title('guided image filter2',fontsize='medium',fontweight='bold')
                    plt.imshow(igs_[1][0,:,:,0],cmap="hsv")
                    
                    plt.subplot(2,3,3)
                    plt.axis('off') 
                    plt.title('guided image filter3',fontsize='medium',fontweight='bold')
                    plt.imshow(igs_[2][0,:,:,0],cmap="hsv")
                    
                    plt.subplot(2,3,4)
                    plt.axis('off') 
                    plt.title('guided image filter4',fontsize='medium',fontweight='bold')
                    plt.imshow(igs_[3][0,:,:,0],cmap="hsv")
                    
                    plt.subplot(2,3,5)
                    plt.axis('off') 
                    plt.title('guided image filter5',fontsize='medium',fontweight='bold')
                    plt.imshow(igs_[4][0,:,:,0],cmap="hsv")
                    
                    plt.savefig("./train_output/"+"step"+str(i)+"igf.png") 
                    plt.close() 
                    
                    
                    #plt.figure()
                    #plt.axis('off') 
                    #plt.title('Squeeze and Excitation',fontsize='medium',fontweight='bold')
                    #plt.imshow(sh_my_see_,cmap="tab20c")
                    #plt.colorbar()
                    #plt.savefig("./train_output/"+"step"+str(i)+"seshow.png") 
                    #plt。close()
                '''

                #if i%999==0 and i!=0:
                #    self.record()

            index = list(self.train_result.keys())
            value = list(self.train_result.values())
            plt.figure(3)
            #plt.axis('off')
            plt.title('train loss', fontsize='medium', fontweight='bold')
            plt.plot(index, value)
            plt.savefig("./train_output/train_loss.png")
            plt.close()
Exemplo n.º 18
0
    def test(self, sess):
        x = tf.placeholder(tf.float32,
                           [1, self.args.IMG_H, self.args.IMG_W, 3],
                           name='x-input')
        sp = tf.placeholder(tf.float32,
                            [1, self.args.IMG_H, self.args.IMG_W, 1],
                            name='sp-input')  #+
        #y_ = tf.placeholder(tf.float32,[1, 415, 1279, 1],name='y-input')
        y_ = tf.placeholder(tf.float32,
                            [1, self.args.IMG_H, self.args.IMG_W, 1],
                            name='y-input')
        dataset = Data(self.args)

        mask = self.get_mask(y_)
        mask_num = tf.reduce_sum(mask)

        mask = self.get_mask(y_)
        mask_add = tf.multiply(mask, -1.0)
        mask_add = tf.add(mask_add, 1.0)
        #mask_add=tf.multiply(mask_add,0.1)

        y_2 = y_  #tf.multiply(y_,mask)
        sp2 = sp  #tf.multiply(sp,mask)
        x2 = x
        #x2=tf.multiply(x,mask)

        #sp2=tf.add(sp2,mask_add)
        #y_2=tf.add(y_2,mask_add)
        #x2=tf.add(x2,mask_add)

        net = Model(self.args)
        pre_depth, rgb_depth, sp_depth, igs, SE = net.network(
            x2, sp2, net.testingmode)

        pre_depth2 = tf.multiply(pre_depth, mask)

        rgb_depth2 = tf.multiply(rgb_depth, mask)
        sp_depth2 = tf.multiply(sp_depth, mask)

        #pre_depth2=tf.add(pre_depth2,mask_add)
        #rgb_depth2=tf.add(rgb_depth2,mask_add)
        #sp_depth2=tf.add(sp_depth2,mask_add)

        test_loss = net.test_loss(
            pre_depth2, y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W

        erro_map = tf.abs(tf.subtract(pre_depth2, y_2))
        erro_map_rgb = tf.abs(tf.subtract(rgb_depth2, y_2))
        erro_map_sp = tf.abs(tf.subtract(sp_depth2, y_2))
        #relative_erro_map=tf.div(erro_map,y_)
        #relative_erro_map=tf.reduce_mean(relative_erro_map)

        MAE = net.MAE_loss(pre_depth2,
                           y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        rgbMAE = net.MAE_loss(rgb_depth2,
                              y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        spMAE = net.MAE_loss(sp_depth2,
                             y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #MAE=(MAE)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W

        iMAE = net.iMAE_loss(pre_depth2,
                             y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        rgbiMAE = net.iMAE_loss(
            rgb_depth2, y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        spiMAE = net.iMAE_loss(sp_depth2,
                               y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #iMAE=(iMAE)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W

        iRMSEs = net.iRMSE_loss(
            pre_depth2, y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        rgbiRMSEs = net.iRMSE_loss(
            rgb_depth2, y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        spiRMSEs = net.iRMSE_loss(
            sp_depth2, y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #iRMSEs=(iRMSEs)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W

        RMSE = net.RMSE_loss(pre_depth2,
                             y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        rgbRMSE = net.RMSE_loss(
            rgb_depth2, y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        spRMSE = net.RMSE_loss(sp_depth2,
                               y_2)  #/mask_num*self.args.IMG_H*self.args.IMG_W
        #RMSE=(RMSE)/self.args.BATCH_SIZE/self.args.IMG_H/self.args.IMG_W
        MAE_list = []
        iMAE_list = []
        RMSE_list = []
        iRMSEs_list = []

        rgbMAE_list = []
        rgbiMAE_list = []
        rgbRMSE_list = []
        rgbiRMSEs_list = []

        spMAE_list = []
        spiMAE_list = []
        spRMSE_list = []
        spiRMSEs_list = []
        imgrgb, imgrsd, imgdd = dataset.read_test_image()
        for i in range(len(dataset.test_rgb_list)):
            loss_value, youtput, erro_map_, mask_ = sess.run(
                [test_loss, pre_depth2, erro_map, mask],
                feed_dict={
                    x: np.expand_dims(imgrgb[i], 0),
                    sp: np.expand_dims(imgrsd[i], 0),
                    y_: np.expand_dims(imgdd[i], 0)
                })
            MAE_, iMAE_, RMSE_, iRMSE_ = sess.run(
                [MAE, iMAE, RMSE, iRMSEs],
                feed_dict={
                    x: np.expand_dims(imgrgb[i], 0),
                    sp: np.expand_dims(imgrsd[i], 0),
                    y_: np.expand_dims(imgdd[i], 0)
                })

            rgbMAE_, rgbiMAE_, rgbRMSE_, rgbiRMSEs_ = sess.run(
                [rgbMAE, rgbiMAE, rgbRMSE, rgbiRMSEs],
                feed_dict={
                    x: np.expand_dims(imgrgb[i], 0),
                    sp: np.expand_dims(imgrsd[i], 0),
                    y_: np.expand_dims(imgdd[i], 0)
                })

            spMAE_, spiMAE_, spRMSE_, spiRMSEs_ = sess.run(
                [spMAE, spiMAE, spRMSE, spiRMSEs],
                feed_dict={
                    x: np.expand_dims(imgrgb[i], 0),
                    sp: np.expand_dims(imgrsd[i], 0),
                    y_: np.expand_dims(imgdd[i], 0)
                })

            rgb_depth2_, sp_depth2_ = sess.run(
                [rgb_depth2, sp_depth2],
                feed_dict={
                    x: np.expand_dims(imgrgb[i], 0),
                    sp: np.expand_dims(imgrsd[i], 0),
                    y_: np.expand_dims(imgdd[i], 0)
                })

            erro_map_rgb_, erro_map_sp_, sp2_, y_2_ = sess.run(
                [erro_map_rgb, erro_map_sp, sp2, y_2],
                feed_dict={
                    x: np.expand_dims(imgrgb[i], 0),
                    sp: np.expand_dims(imgrsd[i], 0),
                    y_: np.expand_dims(imgdd[i], 0)
                })

            self.test_result.update({i: loss_value})
            self.test_relative.update({i: loss_value})

            imae = 0
            imargbe = 0
            imaesp = 0
            iRMSE = 0
            iRMSErgb = 0
            iRMSEsp = 0
            for i in range(youtput.shape[1]):
                for j in range(youtput.shape[2]):
                    if youtput[0, i, j, 0] != 0 and y_2_[0, i, j, 0] != 0:
                        a = youtput[0, i, j, 0]
                        b = y_2_[0, i, j, 0]
                        a = a / 1000000.0
                        b = b / 1000000.0
                        c = a - b
                        d = a * b
                        imae = imae + abs(c / d)
                    if rgb_depth2_[0, i, j, 0] != 0 and y_2_[0, i, j, 0] != 0:
                        a1 = rgb_depth2_[0, i, j, 0]
                        b1 = y_2_[0, i, j, 0]
                        a1 = a1 / 1000000.0
                        b1 = b1 / 1000000.0
                        c1 = a1 - b1
                        d1 = a1 * b1
                        imargbe = imargbe + abs(c1 / d1)
                    if sp_depth2_[0, i, j, 0] != 0 and y_2_[0, i, j, 0] != 0:
                        a2 = sp_depth2_[0, i, j, 0]
                        b2 = y_2_[0, i, j, 0]
                        a2 = a2 / 1000000.0
                        b2 = b2 / 1000000.0
                        c2 = a2 - b2
                        d2 = a2 * b2
                        imaesp = imaesp + abs(c2 / d2)
                    if youtput[0, i, j, 0] != 0 and y_2_[0, i, j, 0] != 0:
                        a3 = youtput[0, i, j, 0]
                        b3 = y_2_[0, i, j, 0]
                        a3 = a3 / 1000000.0
                        b3 = b3 / 1000000.0
                        c3 = a3 - b3
                        d3 = a3 * b3
                        iRMSE = iRMSE + (c3 / d3) * (c3 / d3)
                    if rgb_depth2_[0, i, j, 0] != 0 and y_2_[0, i, j, 0] != 0:
                        a4 = youtput[0, i, j, 0]
                        b4 = y_2_[0, i, j, 0]
                        a4 = a4 / 1000000.0
                        b4 = b4 / 1000000.0
                        c4 = a4 - b4
                        d4 = a4 * b4
                        iRMSErgb = iRMSErgb + (c4 / d4) * (c4 / d4)
                    if sp_depth2_[0, i, j, 0] != 0 and y_2_[0, i, j, 0] != 0:
                        a5 = sp_depth2_[0, i, j, 0]
                        b5 = y_2_[0, i, j, 0]
                        a5 = a5 / 1000000.0
                        b5 = b5 / 1000000.0
                        c5 = a5 - b5
                        d5 = a5 * b5
                        iRMSEsp = iRMSEsp + (c5 / d5) * (c5 / d5)

            imae = imae / self.args.IMG_H / self.args.IMG_W
            imargbe = imargbe / self.args.IMG_H / self.args.IMG_W
            imaesp = imaesp / self.args.IMG_H / self.args.IMG_W
            iRMSE = iRMSE / self.args.IMG_H / self.args.IMG_W
            iRMSE = np.sqrt(iRMSE)
            iRMSErgb = iRMSErgb / self.args.IMG_H / self.args.IMG_W
            iRMSErgb = np.sqrt(iRMSErgb)
            iRMSEsp = iRMSEsp / self.args.IMG_H / self.args.IMG_W
            iRMSEsp = np.sqrt(iRMSEsp)

            print('i:%g,MAE_: %g,RMSE_: %g,iMAE_: %g,iRMSE_: %g' %
                  (i, MAE_, RMSE_, imae, iRMSE))

            if not math.isinf(iMAE_) and not math.isinf(
                    iRMSE_) and not math.isinf(rgbiMAE_) and not math.isinf(
                        rgbiRMSEs_) and not math.isinf(
                            spiMAE_) and not math.isinf(spiRMSEs_):
                MAE_list.append(MAE_)
                iMAE_list.append(imae)
                RMSE_list.append(RMSE_)
                iRMSEs_list.append(iRMSE)

                rgbMAE_list.append(rgbMAE_)
                rgbiMAE_list.append(imargbe)
                rgbRMSE_list.append(rgbRMSE_)
                rgbiRMSEs_list.append(iRMSErgb)

                spMAE_list.append(spMAE_)
                spiMAE_list.append(imaesp)
                spRMSE_list.append(spRMSE_)
                spiRMSEs_list.append(iRMSEsp)
            if not os.path.exists('./test_output/test_result' + str(i)):
                os.mkdir('./test_output/test_result' + str(i))
            '''    
            plt.figure(0)
            
            #plt.subplot(321)
            plt.axis('off') 
            #plt.title('rgb',fontsize='medium',fontweight='bold')
            plt.imshow(imgrgb[i][:,:,:])
            plt.savefig("./test_output/test_result"+str(i)+"/rgb.png") 
            plt.close()
            
            plt.figure(1)
            #plt.subplot(322)
            plt.axis('off') 
            #plt.title('spare depth map',fontsize='medium',fontweight='bold')
            plt.imshow(sp2_[0,:,:,0])
            plt.savefig("./test_output/test_result"+str(i)+"/spare.png") 
            plt.close()
            
            plt.figure(2)
            #plt.subplot(323)
            plt.axis('off') 
            #plt.title('dense depth map',fontsize='medium',fontweight='bold')
            plt.imshow(y_2_[0,:,:,0])
            plt.savefig("./test_output/test_result"+str(i)+"/dense.png")
            plt.close()
            
            
            plt.figure(3)
            plt.axis('off') 
            #plt.title('predicted depth map',fontsize='medium',fontweight='bold')
            plt.imshow(youtput[0,:,:,0])
            plt.savefig("./test_output/test_result"+str(i)+"/predict.png")
            plt.close()
            
            plt.figure(4)
            #plt.subplot(325)
            plt.axis('off') 
            #plt.title('erro map',fontsize='medium',fontweight='bold')
            plt.imshow(erro_map_[0,:,:,0],cmap="hot")
            plt.savefig("./test_output/test_result"+str(i)+"/erro_map.png")
            plt.close()
            
            plt.figure(41)
            #plt.subplot(325)
            plt.axis('off') 
            #plt.title('erro map',fontsize='medium',fontweight='bold')
            plt.imshow(erro_map_rgb_[0,:,:,0],cmap="hot")
            plt.savefig("./test_output/test_result"+str(i)+"/erro_map_rgb_.png")
            plt.close()
            
            plt.figure(42)
            #plt.subplot(325)
            plt.axis('off') 
            #plt.title('erro map',fontsize='medium',fontweight='bold')
            plt.imshow(erro_map_sp_[0,:,:,0],cmap="hot")
            plt.savefig("./test_output/test_result"+str(i)+"/erro_map_sp_.png")
            plt.close()
            
            
            plt.figure(5)
            #plt.subplot(326)
            plt.axis('off') 
            #plt.title('mask map',fontsize='medium',fontweight='bold')
            plt.imshow(mask_[0,:,:,0],cmap="hot")
            plt.savefig("./test_output/test_result"+str(i)+"/mask.png")
            plt.close()
            
            plt.figure(6)
            #plt.subplot(326)
            plt.axis('off') 
            #plt.title('mask map',fontsize='medium',fontweight='bold')
            plt.imshow(rgb_depth2_[0,:,:,0])
            plt.savefig("./test_output/test_result"+str(i)+"/rgb_predict.png")
            plt.close()
            
            plt.figure(7)
            #plt.subplot(326)
            plt.axis('off') 
            #plt.title('mask map',fontsize='medium',fontweight='bold')
            plt.imshow(sp_depth2_[0,:,:,0])
            plt.savefig("./test_output/test_result"+str(i)+"/sp_predict.png")
            plt.close()
            
        
            #sio.savemat("./test_output/mat/"+str(i)+'pridict.mat', {'pridict':youtput[0,:,:,0]})
            
            #sio.savemat("./test_output/mat/"+str(i)+'gt.mat', {'gt':imgdd[i][:,:,0]})
            '''

        sio.savemat("./test_output/MAS.mat", {'MAS': MAE_list})
        sio.savemat("./test_output/iMAS.mat", {'iMAS': iMAE_list})
        sio.savemat("./test_output/RMSE.mat", {'RMSE': RMSE_list})
        sio.savemat("./test_output/iRMSE.mat", {'iRMSE': iRMSEs_list})

        sio.savemat("./test_output/rgbMAS.mat", {'rgbMAS': rgbMAE_list})
        sio.savemat("./test_output/rgbiMAS.mat", {'rgbiMAS': rgbiMAE_list})
        sio.savemat("./test_output/rgbRMSE.mat", {'rgbRMSE': rgbRMSE_list})
        sio.savemat("./test_output/rgbiRMSE.mat", {'rgbiRMSE': rgbiRMSEs_list})

        sio.savemat("./test_output/spMAS.mat", {'spMAS': spMAE_list})
        sio.savemat("./test_output/spiMAS.mat", {'spiMAS': spiMAE_list})
        sio.savemat("./test_output/spRMSE.mat", {'spRMSE': spRMSE_list})
        sio.savemat("./test_output/spiRMSE.mat", {'spiRMSE': spiRMSEs_list})

        x = len(MAE_list)
        plt.figure(8)
        xx = list(range(1, x + 1, 1))
        #plt.subplot(326)
        #plt.axis('off')
        #plt.title('mask map',fontsize='medium',fontweight='bold')

        plt.scatter(xx, rgbMAE_list, s=3, c='r')
        plt.scatter(xx, spMAE_list, s=3, c='g')
        plt.scatter(xx, MAE_list, s=3, c='b')
        plt.legend(["rgb", "sp", "rgb+sp"])
        plt.title("MAE")
        plt.xlabel('index')
        plt.ylabel('mm')
        plt.savefig("./test_output/MAE.png")
        plt.close()

        plt.figure(9)
        xx = list(range(1, x + 1, 1))
        #plt.subplot(326)
        #plt.axis('off')
        #plt.title('mask map',fontsize='medium',fontweight='bold')

        plt.scatter(xx, rgbiMAE_list, s=3, c='r')
        plt.scatter(xx, spiMAE_list, s=3, c='g')
        plt.scatter(xx, iMAE_list, s=3, c='b')
        plt.legend(["rgb", "sp", "rgb+sp"])
        #plt.legend(["without SE"])
        plt.title("iMAE")
        mins = min([min(spiMAE_list), min(rgbiMAE_list), min(iMAE_list)])
        maxs = max([max(spiMAE_list), max(rgbiMAE_list), max(iMAE_list)])
        plt.ylim(mins - 100, maxs + 100)

        plt.xlabel('index')
        plt.ylabel('1/km')
        plt.savefig("./test_output/iMAE_list.png")
        plt.close()

        plt.figure(10)
        xx = list(range(1, x + 1, 1))
        #plt.subplot(326)
        #plt.axis('off')
        #plt.title('mask map',fontsize='medium',fontweight='bold')

        plt.scatter(xx, rgbMAE_list, s=3, c='r')
        plt.scatter(xx, spMAE_list, s=3, c='g')
        plt.scatter(xx, MAE_list, s=3, c='b')
        plt.legend(["rgb", "sp", "rgb+sp"])
        plt.title("RMSE")
        plt.xlabel('index')
        plt.ylabel('mm')
        plt.savefig("./test_output/RMSE.png")
        plt.close()

        plt.figure(11)
        xx = list(range(1, x + 1, 1))

        plt.scatter(xx, rgbiRMSEs_list, s=3, c='r')
        plt.scatter(xx, spiRMSEs_list, s=3, c='g')
        plt.scatter(xx, iRMSEs_list, s=3, c='b')
        plt.title("iRMSE")
        mins = min([min(iRMSEs_list), min(rgbiRMSEs_list), min(spiRMSEs_list)])
        maxs = max([max(iRMSEs_list), max(rgbiRMSEs_list), max(spiRMSEs_list)])
        plt.ylim(0, maxs + 100)
        plt.legend(["rgb", "sp", "rgb+sp"])
        plt.xlabel('index')
        plt.ylabel('1/km')
        plt.savefig("./test_output/iRMSEs.png")
        plt.close()

        ave_MAE = np.mean(MAE_list)
        var_MAE = np.var(MAE_list)

        ave_iMAE = np.mean(iMAE_list)
        var_iMAE = np.var(iMAE_list)

        ave_RMSE = np.mean(RMSE_list)
        var_RMSE = np.var(RMSE_list)

        ave_iRMSE = np.mean(iRMSEs_list)
        var_iRMSE = np.var(iRMSEs_list)
        ######################################################
        ave_rgbMAE = np.mean(rgbMAE_list)
        var_rgbMAE = np.var(rgbMAE_list)

        ave_rgbiMAE = np.mean(rgbiMAE_list)
        var_rgbiMAE = np.var(rgbiMAE_list)

        ave_rgbRMSE = np.mean(rgbRMSE_list)
        var_rgbRMSE = np.var(rgbRMSE_list)

        ave_rgbiRMSE = np.mean(rgbiRMSEs_list)
        var_rgbiRMSE = np.var(rgbiRMSEs_list)
        ######################################################
        ave_spMAE = np.mean(spMAE_list)
        var_spMAE = np.var(spMAE_list)

        ave_spiMAE = np.mean(spiMAE_list)
        var_spiMAE = np.var(spiMAE_list)

        ave_spRMSE = np.mean(spRMSE_list)
        var_spRMSE = np.var(spRMSE_list)

        ave_spiRMSE = np.mean(spiRMSEs_list)
        var_spiRMSE = np.var(spiRMSEs_list)

        print("^^^^^^^^^^^^^^^^")
        print(ave_MAE)
        plt.figure(12)
        xx = list(range(0, 3))
        #plt.subplot(326)
        #plt.axis('off')
        #plt.title('mask map',fontsize='medium',fontweight='bold')

        plt.bar([1], [ave_rgbMAE], width=0.2, color=['r'])
        plt.bar([2], [ave_spMAE], width=0.2, color=['g'])
        plt.bar([3], [ave_MAE], width=0.2, color=['b'])
        plt.xlabel('index')
        plt.ylabel('mm')
        plt.legend(["rgb", "sp", "rgb+sp"])
        plt.title("MAE")
        plt.savefig("./test_output/MAE_BAR.png")
        plt.close()

        plt.bar([1], [ave_rgbiMAE], width=0.2, color=['r'])
        plt.bar([2], [ave_spiMAE], width=0.2, color=['g'])
        plt.bar([3], [ave_iMAE], width=0.2, color=['b'])
        plt.xlabel('index')
        plt.ylabel('1/km')
        plt.legend(["rgb", "sp", "rgb+sp"])
        plt.title("iMAE")
        mins = min([ave_iMAE, ave_rgbiMAE, ave_spiMAE])
        maxs = max([ave_iMAE, ave_rgbiMAE, ave_spiMAE])
        plt.ylim(0, maxs + 30)
        plt.savefig("./test_output/iMAE_BAR.png")
        plt.close()

        plt.bar([1], [ave_rgbRMSE], width=0.2, color=['r'])
        plt.bar([2], [ave_spRMSE], width=0.2, color=['g'])
        plt.bar([3], [ave_RMSE], width=0.2, color=['b'])
        plt.legend(["rgb", "sp", "rgb+sp"])
        #plt.legend(["without SE"])
        plt.xlabel('mm')
        plt.ylabel('1/km')
        plt.title("RMSE")
        plt.savefig("./test_output/RMSE_BAR.png")
        plt.close()

        plt.bar([1], [ave_rgbiRMSE], width=0.2, color=['r'])
        plt.bar([2], [ave_spiRMSE], width=0.2, color=['g'])
        plt.bar([3], [ave_iRMSE], width=0.2, color=['b'])
        mins = min([ave_iRMSE, ave_rgbiRMSE, ave_spiRMSE])
        maxs = max([ave_iRMSE, ave_rgbiRMSE, ave_spiRMSE])
        plt.ylim(0, maxs + 30)
        plt.legend(["rgb", "sp", "rgb+sp"])
        #plt.legend(["without SE"])
        plt.xlabel('index')
        plt.ylabel('1/km')
        plt.title("iRMSE")
        plt.savefig("./test_output/iRMSE_BAR.png")
        plt.close()
Exemplo n.º 19
0
def train(epoch):
    global global_step, criterion
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, data in enumerate(train_dataloader):
        # measure data loading time
        if opt.gpu_ids:
            data = map_data(lambda x: Variable(x.cuda()), data)
        else:
            data = map_data(lambda x: Variable(x), data)

        data_time.update(time.time() - end)
        data = Data(*data)
        output = model.forward(data)
        loss = criterion(output, data)
        # measure accuracy and record loss
        losses.update(loss.data[0], opt.batch_size)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)

        if (global_step + 1) % opt.print_freq == 0:
            all_loss = criterion.summary()
            util.diagnose_network(model.cnn)
            util.diagnose_network(model.fc_loc)
            visualize(data,
                      output.warpped,
                      global_step,
                      0,
                      opt,
                      mode='save',
                      name='train')

            print('Epoch: [{0}][{1}/{2}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Learning Rate {learning_rate}\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\n\t'
                  'ALl Loss {all_loss}'.format(
                      epoch,
                      i,
                      len(train_dataloader),
                      batch_time=batch_time,
                      learning_rate=scheduler.get_lr(),
                      data_time=data_time,
                      loss=losses,
                      all_loss=all_loss))

        if (global_step + 1) % opt.log_freq == 0:
            all_loss = criterion.summary()
            tl.log_value('train/Loss', losses.val, global_step)
            tl.log_value('train/Learning Rate',
                         scheduler.get_lr()[0], global_step)
            # tl.log_value('train/Batch Time', batch_time.val, global_step)
            tl.log_value('train/Data Time', data_time.val, global_step)
            for k, v in all_loss.items():
                tl.log_value('train/loss/' + k, v, global_step)
            for sid in range(data.fm[0].shape[0]):
                visualize(data,
                          output.warpped,
                          global_step,
                          sid,
                          opt,
                          mode='log',
                          name='train')

        if (global_step + 1) % opt.val_freq == 0:
            validate(epoch)
            validate(epoch, False)

        # if global_step == 500:
        #     opt.id_loss_weight = 0
        #     criterion = sys.modules['loss'].Loss(opt)

        global_step += 1
        end = time.time()
Exemplo n.º 20
0
def main(num, run_num):

    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2'
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    overall_oa = []  # used to store OA
    overall_aa = []  # used to store AA
    overall_kappa = []  # used to store Kappa
    overall_matrix = [
    ]  # used to Used to store the resulting obfuscation matrix
    for j in range(num):  # In this experiment, num is 20
        print('第' + str(j + 1) + "次运行:")
        for i in range(1):
            args.id = str(i)
            tf.reset_default_graph()
            with tf.Session(config=config) as sess:
                args.result = os.path.join(args.result, args.id)
                args.log = os.path.join(args.log, args.id)
                args.model = os.path.join(args.model, args.id)
                args.tfrecords = os.path.join(args.tfrecords, args.id)
                if not os.path.exists(args.model):
                    os.mkdir(args.model)
                if not os.path.exists(args.log):
                    os.mkdir(args.log)
                if not os.path.exists(args.result):
                    os.mkdir(args.result)
                if not os.path.exists(args.tfrecords):
                    os.mkdir(args.tfrecords)

                dataset = Data(args)
                #test_pos_all is used to draw a diagram with no background
                test_pos_all = dataset.read_data()
                # producing training dataset
                train_dataset = dataset.data_parse(os.path.join(
                    args.tfrecords, 'train_data.tfrecords'),
                                                   type='train')
                # producing testing dataset, or producing during use.
                test_dataset = dataset.data_parse(os.path.join(
                    args.tfrecords, 'test_data.tfrecords'),
                                                  type='test')
                #all_dataset = dataset.data_parse(os.path.join(args.tfrecords, 'map_data.tfrecords'), type='map')

                model = Model(args, sess)
                # traing the model
                if not args.load_model:
                    model.train(train_dataset, dataset)
                else:
                    model.load(args.model)
                # the model return the result of experiments once.
                oa, aa, kappa, matrix, result_label = model.test(dataset)
                # Calculated running time
                over = time.time()
                print("平均运行时间:  " + str((over - start) / 60.0) + " min")
                #create_All_label(model,args.data_name,dataset,j) # draw picture containing background
                #decode_map(test_pos_all, result_label,args.data_name) # draw picture not containing background
                overall_oa.append(oa)
                overall_aa.append(aa)
                overall_kappa.append(kappa)
                overall_matrix.append(matrix)
                args.result = 'result'
                args.log = 'log'
                args.tfrecords = 'tfrecords'
                args.model = 'model'
    # Store the results of the experiment locally
    if not os.path.exists(str(pathlib.Path(args.data_name))):
        os.mkdir(str(pathlib.Path(args.data_name)))
    sio.savemat(
        os.path.join(str(pathlib.Path(args.data_name)),
                     'result' + str(run_num) + '.mat'), {
                         'oa': overall_oa,
                         'aa': overall_aa,
                         'kappa': overall_kappa
                     })
    sio.savemat(
        os.path.join(str(pathlib.Path(args.data_name)),
                     'matrix' + str(run_num) + '.mat'),
        {'matrix': overall_matrix})
Exemplo n.º 21
0
def main():
    msg = """
            Usage:
            Training: 
                python generate.py --mode train --clas novel
            Sampling:
                python generate.py --mode sample --head 明月别枝惊鹊
            """
    parser = argparse.ArgumentParser()
    parser.add_argument('--mode',
                        type=str,
                        default='',
                        help=u'usage: train or sample, sample is default')
    parser.add_argument('--head', type=str, default='', help='生成藏头诗')

    parser.add_argument('--clas', type=str, default='', help='novel or poetry')

    args = parser.parse_args()

    if args.mode == 'sample':
        infer = True
        data = Data(data_dir,
                    input_file,
                    vocab_file,
                    tensor_file,
                    batch_size=batch_size)
        model = Model(data=data,
                      infer=infer,
                      num_layers=num_layers,
                      layers_size=layers_size)
        print(sample(data, model, head=args.head))
    elif args.mode == 'train':
        infer = False
        clas = args.clas
        if args.clas == 'novel':
            data = Data(novel_data_dir,
                        novel_input_file,
                        novel_vocab_file,
                        novel_tensor_file,
                        seq_len=seq_len,
                        batch_size=batch_size)
            model = Model(data=data,
                          infer=infer,
                          num_layers=num_layers,
                          layers_size=layers_size)
            print(train(data, model))
        elif args.clas == 'poetry':
            data = Data(data_dir,
                        input_file,
                        vocab_file,
                        tensor_file,
                        clas='poetry',
                        batch_size=batch_size)
            model = Model(data=data,
                          infer=infer,
                          num_layers=num_layers,
                          layers_size=layers_size)
            print(train(data, model))
        else:
            print(msg)
    else:
        print(msg)
Exemplo n.º 22
0
config = Config()
conv_layers = config.model.conv_layers
fully_layers = config.model.fully_connected_layers
l0 = config.l0
alphabet_size = config.alphabet_size
embedding_size = config.model.embedding_size
num_of_classes = config.num_of_classes
th = config.model.th
p = config.dropout_p
print("Loaded")

from data_loader import Data

all_data = Data(data_source = config.train_data_source, 
                     alphabet = config.alphabet,
                     l0 = config.l0,
                     batch_size = 0,
                     no_of_classes = config.num_of_classes)

all_data.loadData()
seed = 7
X, Y = all_data.getAllData()
kfold = StratifiedKFold(n_splits=config.kfolds, shuffle=True, random_state=seed)

for train, test in kfold.split(X, Y.reshape(Y.shape[0],)):
    print("Building the model...")
    # building the model

    # Input layer
    inputs = Input(shape=(l0,), name='sent_input', dtype='int64')
Exemplo n.º 23
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# Import Dataset
from data_loader import Data
data = Data(
    percent_load=1.0,
    filepath='../mri-data/Cardic_Undersampled_for_CS/training_data_small.h5')

# Import Models
from model import unet

# Training Parameters
learning_rate = 0.0001
num_steps = 100
batch_size = 32
display_step = 10

# Network Parameters
WIDTH = 256
HEIGHT = 256
CHANNELS = 2
Exemplo n.º 24
0
class PathFinder:
    def __init__(self):
        self._costs = defaultdict()
        self._data = Data()
        self._gmaps_handler = GoogleMapsHandler()

    def compute_distance(self, shop1: Shop, shop2: Shop):
        if (shop1.identifier, shop2.identifier) not in self._costs:
            self._costs[(shop1.identifier, shop2.identifier)] = compute_distance(shop1.coords, shop2.coords)
            self._costs[(shop2.identifier, shop1.identifier)] = self._costs[(shop1.identifier, shop2.identifier)]
        return self._costs[(shop1.identifier, shop2.identifier)]

    def compute_edges(self, shop_lists: List[Shop]):
        edges = list()
        for shop_list1, shop_list2 in pairwise(shop_lists):
            for shop1, shop2 in product(shop_list1, shop_list2):
                edges.append(Edge(shop1.identifier, shop2.identifier, self.compute_distance(shop1, shop2)))
        return edges

    def compute_graphs(self, data, my_coords: Coords):
        graphs = list()
        # Crea tots els ordres possibles de recorregut
        used_orders = set()
        for i, key_order in enumerate(permutations(data.keys(), len(data)), start=1):
            if key_order in used_orders:
                continue
            used_orders.add(key_order)
            used_orders.add(key_order[::-1])  # Only one direction is enough
            init = time()
            ordered_shops = list()
            ordered_shops.append([Shop(identifier='A', coords=my_coords)])
            ordered_shops.extend([data[key] for key in key_order])
            ordered_shops.append([Shop(identifier='B', coords=my_coords)])
            graph = Graph(self.compute_edges(ordered_shops))
            graphs.append(graph)
            print(f'Computed subgraph {i} in {time() - init:.2f}s')
        return graphs

    def _find_best_with_graphs(self, data, my_coords: Coords):
        """
        Donat el diccionari de comerços dins de l'area i la teva localització et retorna els subgraphs
        :param data:
        :param my_coords:
        :return:
        """
        graphs = self.compute_graphs(data, my_coords)
        abs_best_route = None
        abs_best_dist = float('inf')
        for i, graph in enumerate(graphs, start=1):
            init = time()
            best_route, dist = graph.dijkstra('A', 'B')
            best_route = best_route[1:-1]  # Remove A and B
            if dist < abs_best_dist:
                abs_best_route = best_route
                abs_best_dist = dist
            print(f'Computed best path for subgraph {i} in {time() - init:.2f}s:')
            print('\t' + '-->'.join(['Home'] + [str(identifier) for identifier in best_route] + ['Home']))
            print(f'\tDistance: {dist:.2f}m')
        return abs_best_route, abs_best_dist

    def find_best_path(self, desired_activities, my_coords, max_radius=1000, max_shops=100):
        """

        :param desired_activities:
        :param my_coords: coordinates of my house
        :param max_radius: in meters, max distance between starting point and a shop
        :param max_shops: max shops to consider of each kind, ordered by distance (increasing)
        :return:
        """
        init = time()
        filtered_data = self._data.get_filtered_shops(my_coords=my_coords,
                                                      desired_activities=desired_activities,
                                                      max_radius=max_radius,
                                                      max_shops=max_shops)

        print(f'Found locals in a radius of {max_radius}m:')
        for k, v in filtered_data.items():
            print(f"\t{k + ':':30} {len(v)}")

        best_route, best_dist = self._find_best_with_graphs(filtered_data, my_coords)
        print('The best route is:')
        print('-->'.join(['Home'] + [str(self._data[identifier]) for identifier in best_route] + ['Home']))
        print(f'The route covers a distance of {best_dist:.2f}m')
        output = list()
        for identifier in best_route:
            shop = self._data[identifier]
            self._gmaps_handler.get_shop_info(shop)
            output.append(shop.to_dict())
        print(f'Total runtime: {time() - init:.2f}s')
        return output

    def find_best_supermarket(self, my_coords, max_radius=1000, max_shops=20):
        init = time()
        supermarkets = self._data.get_filtered_shops(my_coords=my_coords,
                                                     desired_activities={'Supermercats'},
                                                     max_radius=max_radius,
                                                     max_shops=max_shops)['Supermercats']

        if len(supermarkets) >= max_shops:
            print(f'Found more than {max_shops} supermarkets in a radius of {max_radius}m')
        else:
            print(f'Found a total of {len(supermarkets)} supermarkets in a radius of {max_radius}m')

        good_shops = []
        for shop in supermarkets:
            self._gmaps_handler.get_shop_info(shop)
            if shop.rating >= 2.5:
                good_shops.append(shop)
                if len(good_shops) >= 5:
                    break

        for shop in good_shops:
            self._gmaps_handler.get_popular_times(shop)

        best_shop = min(good_shops, key=lambda x: x.pct_people)
        print(f'The best supermaket is: {best_shop}, '
              f'at a distance of {self.compute_distance(Shop(identifier="A", coords=my_coords), best_shop):.2f}m')
        print(f'Found best supermarket in {time() - init:.2f}s')
        return json.dumps(best_shop.to_dict()).encode('utf8')

    def run(self, my_coords, activities):
        if len(activities) <= 3:
            result = self.find_best_path(desired_activities=activities, my_coords=my_coords)
        else:
            result = self.find_best_supermarket(my_coords=my_coords)
        return result
Exemplo n.º 25
0
def main():
    opt = TestOptions().parse()
    # preprocess data
    all_stable_frames, fps = get_images(opt.video_root + 'stable/' +
                                        str(opt.video_index) + '.avi')
    all_unstable_frames, fps = get_images(opt.video_root + 'unstable/' +
                                          str(opt.video_index) + '.avi')

    # generate data flow
    pred_frames_for_input = []
    singleVideoData = PreprocessDataSet(all_stable_frames, all_unstable_frames,
                                        pred_frames_for_input, opt)
    eval_data_loader = torch.utils.data.DataLoader(singleVideoData)
    model, criterion = create_model(opt)
    checkpoint = torch.load(opt.checkpoint_path,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    data_time = AverageMeter()
    end = time.time()
    # go through model to get output
    idx = 0
    pred_frames = []
    if opt.instnorm:
        model.train()
    else:
        model.eval()
    if opt.fake_test:
        print("fake test")
        pred_frames = []
        for i in range(50):
            for j, data in enumerate(eval_data_loader):
                if opt.gpu_ids:
                    data = map_data(
                        lambda x: Variable(x.cuda(), volatile=True), data)
                else:
                    data = map_data(lambda x: Variable(x, volatile=True), data)
                data_time.update(time.time() - end)
                data = Data(*data)
                output = model.forward(data)
                warpped = output.warpped
                pred_frames += data.prefix
                for u, w, t in zip(data.unstable, warpped, data.target):
                    pred_frames += (u, w, torch.abs(w - t))
                # visualize(data, warpped, i, 0, opt, 'save')

        pred_frames = list(map(lambda x: tensor2im(x.data), pred_frames))

    else:
        for i in range(0, len(all_stable_frames) - 1):
            if i % 100 == 0:
                print("=====> %d/%d" % (i, len(all_stable_frames)))
            for j, data in enumerate(eval_data_loader):
                if opt.gpu_ids:
                    data = map_data(
                        lambda x: Variable(x.cuda(), volatile=True), data)
                else:
                    data = map_data(lambda x: Variable(x, volatile=True), data)
                data_time.update(time.time() - end)
                data = Data(*data)
                # print(data)
                output = model.forward(data)
                warpped = output.warpped
                # save outputs
                # if (i < opt.prefix[0]):
                #     last_frame = all_stable_frames[0]
                # else:
                #     last_frame = pred_frames_for_input[len(pred_frames_for_input) + 1 - opt.prefix[0]]
                # print(data.prefix[-1][0].data.shape)
                last_frame = output_to_input([data.prefix[-1]], opt)
                pred_frames.append(
                    draw_imgs(output_to_input(warpped,
                                              opt), all_stable_frames[i],
                              all_unstable_frames[i], last_frame))
                pred_frames_for_input.append(output_to_input(warpped, opt))
                eval_data_loader = torch.utils.data.DataLoader(
                    PreprocessDataSet(all_stable_frames, all_unstable_frames,
                                      pred_frames_for_input, opt))
                # if i < 100: visualize(data, warpped, i, 0, opt, 'save')

    # print video
    generate_video(pred_frames, fps, opt)
Exemplo n.º 26
0
import move_range_person as mrp
import numpy
from global_vars import Main as Global
from data_loader import Main as Data
from terrain_container import Main as Terrain_Container
from person_container import Main as Person_Container
import map_controller
import battle
if __name__ == '__main__':
    mov_map = numpy.random.randint(1,5,(15, 15))
    pos = (2, 3)
    mov = 10
    unstable=set([(1,2),(5,6)])
    uncross=set([(4,5),(2,2)])
    print(mrp.calc_move(unstable,uncross,mov_map,pos,mov))
    data=Data()
    global_vars=Global(data)
    print(global_vars.terrainBank["Forest"].enhance)
    print(global_vars.clsBank["Lord"].weapon_rank)
    print(global_vars.personBank["1"].ability)
    terrain_container_test=Terrain_Container(data.terrain_map,global_vars.terrainBank)
    person_container_test=Person_Container(data.map_armylist,global_vars.personBank)
    map1=map_controller.Main(terrain_container_test,person_container_test,global_vars)
    print(global_vars.itemtypeBank["Iron Lance"].ability_bonus)
    print(global_vars.itemBank[1].itemtype.weapontype)
    print(global_vars.personBank["1"].item[1].itemtype.name)
    print(global_vars.personBank["1"].weapon_rank)
    print(battle.Battle(global_vars.personBank["1"],global_vars.personBank["2"],
                        global_vars.personBank["1"].item[0],global_vars.personBank["2"].item[0],
                        map1,(0,3)).battle())
    print(global_vars.personBank["1"].suprank)
Exemplo n.º 27
0
                      default=True)
    parser.add_option("--no_pos",
                      action="store_false",
                      dest="use_pos",
                      default=True)
    parser.add_option("--stop", type="int", dest="stop", default=50)
    parser.add_option("--dynet-mem", type="int", dest="mem", default=512)
    parser.add_option("--dynet-autobatch",
                      type="int",
                      dest="dynet-autobatch",
                      default=0)
    parser.add_option("--dynet-l2", type="float", dest="dynet-l2", default=0)

    (options, args) = parser.parse_args()
    print 'loading chars'
    data = Data(options.train_data)
    universal_tags = [
        'ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM',
        'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X'
    ]
    network = Network(universal_tags, data.chars, options)
    print 'splitting train data'
    print 'starting epochs'
    dev_batches = data.get_dev_batches(network, data.de2dict_dev)
    dev_noise_batches = data.get_dev_batches(network, data.shuffled_dict)
    print 'loaded dev+noise batches'

    best_performance = eval(dev_batches)
    random_performance = eval(dev_noise_batches)
    print 'dev sim/random:', best_performance, random_performance
    for e in range(10):
Exemplo n.º 28
0
    # CrossEntropyLoss
    loss = loss_model(fc_output, stars)

    loss.backward()
    optimizer.step()

    return loss.data.cpu().numpy()[0]


##############################################################################################
##############################################################################################
if __name__ == "__main__":

    # Data Preparation
    data = Data(config.Mode)
    data.load()
    config.uniq_char = len(data.char_list)

    # Model Preparation
    cnn_models = models.Conv1d(config.cnn_w, config.pool_w,
                               config.cnn_output_feature,
                               data.char_dict).cuda()
    linear_model = models.FC(config.fc_input_feature, config.fc_hidden_feature,
                             config.class_n).cuda()
    loss_model = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(parameters(cnn_models, linear_model),
                                lr=learning_rate,
                                momentum=config.momentum)

    for i in range(config.num_epochs):
Exemplo n.º 29
0
 def __init__(self):
     self._costs = defaultdict()
     self._data = Data()
     self._gmaps_handler = GoogleMapsHandler()
Exemplo n.º 30
0
def main(args):

    # Step 1: Prepare graph data and retrieve train/validation/test index ============================= #
    # check cuda
    if args.gpu >= 0 and th.cuda.is_available():
        device = 'cuda:{}'.format(args.gpu)
    else:
        device = 'cpu'
    
    #construct graph, split in/out edges and prepare train/validation/test data_loader
    data = Data(args.dataset, args.lbl_smooth, args.num_workers, args.batch_size)
    data_iter = data.data_iter #train/validation/test data_loader
    graph = data.g.to(device)
    num_rel = th.max(graph.edata['etype']).item() + 1

    #Compute in/out edge norms and store in edata
    graph = in_out_norm(graph)

    # Step 2: Create model =================================================================== #
    compgcn_model=CompGCN_ConvE(num_bases=args.num_bases,
                                num_rel=num_rel,
                                num_ent=graph.num_nodes(),
                                in_dim=args.init_dim,
                                layer_size=args.layer_size,
                                comp_fn=args.opn,
                                batchnorm=True,
                                dropout=args.dropout,
                                layer_dropout=args.layer_dropout,
                                num_filt=args.num_filt,
                                hid_drop=args.hid_drop,
                                feat_drop=args.feat_drop,
                                ker_sz=args.ker_sz,
                                k_w=args.k_w,
                                k_h=args.k_h
                                )
    compgcn_model = compgcn_model.to(device)

    # Step 3: Create training components ===================================================== #
    loss_fn = th.nn.BCELoss()
    optimizer = optim.Adam(compgcn_model.parameters(), lr=args.lr, weight_decay=args.l2)
    
    # Step 4: training epoches =============================================================== #
    best_mrr = 0.0
    kill_cnt = 0
    for epoch in range(args.max_epochs):
        # Training and validation using a full graph
        compgcn_model.train()
        train_loss=[]
        t0 = time()
        for step, batch in enumerate(data_iter['train']):
            triple, label = batch[0].to(device), batch[1].to(device)
            sub, rel, obj, label = triple[:, 0], triple[:, 1], triple[:, 2], label
            logits = compgcn_model(graph, sub, rel)
		
            # compute loss
            tr_loss = loss_fn(logits, label)
            train_loss.append(tr_loss.item())

            # backward
            optimizer.zero_grad()
            tr_loss.backward()
            optimizer.step()

        train_loss = np.sum(train_loss)

        t1 = time()  
        val_results = evaluate(compgcn_model, graph, device, data_iter, split='valid')
        t2 = time()

        #validate
        if val_results['mrr']>best_mrr:
            best_mrr = val_results['mrr']
            best_epoch = epoch
            th.save(compgcn_model.state_dict(), 'comp_link'+'_'+args.dataset)
            kill_cnt = 0
            print("saving model...")
        else:
            kill_cnt += 1
            if kill_cnt > 100:
                print('early stop.')
                break
        print("In epoch {}, Train Loss: {:.4f}, Valid MRR: {:.5}\n, Train time: {}, Valid time: {}"\
                .format(epoch, train_loss, val_results['mrr'], t1-t0, t2-t1))
    
    #test use the best model
    compgcn_model.eval()
    compgcn_model.load_state_dict(th.load('comp_link'+'_'+args.dataset))
    test_results = evaluate(compgcn_model, graph, device, data_iter, split='test')
    print("Test MRR: {:.5}\n, MR: {:.10}\n, H@10: {:.5}\n, H@3: {:.5}\n, H@1: {:.5}\n"\
            .format(test_results['mrr'], test_results['mr'], test_results['hits@10'], test_results['hits@3'], test_results['hits@1']))