Exemple #1
0
    def save_subset_datasets(self, class_divisions, save_dir):
        for num, cls_div in tqdm(enumerate(class_divisions)):
            cls_div_ids = [self.names_to_cls[cls] for cls in cls_div]

            dict_annos = defaultdict(list)
            for id, anno_details in self.annotations.items():
                subset_annos = []
                for anno in anno_details:
                    if anno["LabelName"] in cls_div_ids:
                        subset_annos.append(anno)
                if len(subset_annos) > 0:
                    dict_annos[id] = subset_annos

            dict_subset_anno = dict(dict_annos)
            dict_subset_idx_to_id = {
                i: k
                for i, k in enumerate(dict_subset_anno.keys())
            }
            clsids_to_idx = {cls: i + 1 for i, cls in enumerate(cls_div_ids)}
            clsids_to_idx["background"] = 0

            saving_subset_dir = save_dir + "/" + str(num)
            make_save_dir(saving_subset_dir)

            with open(saving_subset_dir + "/" + 'clsids_to_idx.pkl',
                      'wb') as handle:
                pickle.dump(clsids_to_idx,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

            with open(saving_subset_dir + "/" + "anno.json", 'w') as fp:
                json.dump(dict_subset_anno, fp)

            with open(saving_subset_dir + "/" + 'idx_to_id.pkl',
                      'wb') as handle:
                pickle.dump(dict_subset_idx_to_id,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

            dict_i_to_imglist = {}
            count = 0

            for i, (label, info) in enumerate(self.dict_label_info.items()):
                if label in cls_div:
                    dict_i_to_imglist[count] = info.imgs
                    count += 1

            with open(saving_subset_dir + '/dict_i_to_imglist.pkl',
                      'wb') as handle:
                pickle.dump(dict_i_to_imglist,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

            f = open(saving_subset_dir + "/cls_num.txt", "w")
            num_classes = str(len(clsids_to_idx))
            f.write(num_classes)
            f.close()
def train(args, params):
    print('Load Training Set')
    train = loadmat(params['train'])
    print('Load Dev Set')
    dev = loadmat(params['dev'])
    train['x'] = zscore(train['x'], axis=1)
    dev['x'] = zscore(dev['x'], axis=1)
    print('Training Size {} elements'.format(len(train['x'])))
    print('Dev Size {} elements'.format(len(dev['x'])))

    save_dir = utils.make_save_dir(params['save_dir'], args.experiment)

    import keras

    stopping = keras.callbacks.EarlyStopping(patience=8)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        factor=0.1, patience=2, min_lr=params['learning_rate'] * 0.01)
    checkpoint = keras.callbacks.ModelCheckpoint(
        filepath=utils.get_saved_filename(save_dir), save_best_only=True)

    history = model.fit(train['x'],
                        train['y'],
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        validation_data=(dev['x'], dev['y']),
                        callbacks=[checkpoint, reduce_lr, stopping])

    utils.save(history, save_dir, name='history.bin')
Exemple #3
0
    def __init__(self, args):
        self.args = args
        with open(args.config, 'r') as stream:
            config = yaml.load(stream, Loader=yaml.SafeLoader)
            self.config = config[self.args.task]
        if args.model_type == 'transformer':
            self.data_utils = DataUtils(self.config, args.train, args.task)
        elif args.model_type == 'bert':
            assert args.task == 'seq2seq'
            self.data_utils = bert_utils(self.config, args.train, args.task)
        if args.train and args.save_checkpoints:
            self.model_dir = make_save_dir(
                os.path.join(args.model_dir, args.task, args.exp_name))
        self._disable_comet = args.disable_comet
        self._model_type = args.model_type
        self._save_checkpoints = args.save_checkpoints

        ###### loading .... ######
        print("====================")
        print("start to build model")
        print('====================')
        vocab_size = self.data_utils.vocab_size
        print("Vocab Size: %d" % (vocab_size))
        self.model = self.make_model(src_vocab=vocab_size,
                                     tgt_vocab=vocab_size,
                                     config=self.config['model'])
Exemple #4
0
    def __init__(self, args):
        super(Solver, self).__init__()
        self.args = args
        self._print_every_step = args.print_every_step
        self._valid_every_step = args.valid_every_step
        self._save_checkpoints = args.save_checkpoints
        if args.train:
            self.model_dir = make_save_dir(
                os.path.join(args.model_path, args.exp_name))
        self.batch_size = args.batch_size
        self.num_epoch = args.num_epoch
        self._disable_comet = args.disable_comet
        self._condition_generation = args.condition_generation
        self._saved_checkpoint = args.load_model

        self.tokenizer = T5Tokenizer.from_pretrained('t5-base',
                                                     padding_side='right')

        if args.test:
            self.outfile = open(os.path.join(args.pred_dir, args.prediction),
                                'w')
            self.template_decoding = args.template_decoding
            if args.template_decoding:
                all_slots = [l.strip('\n') for l in open(args.f_all_slots)]
                all_templates = [
                    l.strip('\n') for l in open(args.f_all_templates)
                ]
                self.temp = TemplateHandler(all_slots, all_templates,
                                            self.tokenizer)

        self.prepare_model(condition_generation=args.condition_generation,
                           template_decoding=args.template_decoding)
Exemple #5
0
def save_components(model, optimizer, scheduler, save_dir):
        
    if save_dir != None:
        print("Saving checkpoint to:", save_dir)
        make_save_dir(''.join(save_dir.split('/')[:-1]))
        if torch.cuda.device_count() > 1:
            torch.save({
                "model": model.module.state_dict(),
                "optimizer": optimizer.state_dict(),
                "scheduler": scheduler.state_dict(),
            }, save_dir)
        else:
            torch.save({
                "model": model.state_dict(),
                "optimizer": optimizer.state_dict(),
                "scheduler": scheduler.state_dict(),
            }, save_dir)
Exemple #6
0
    def __init__(self, args):
        self.args = args

        self.model_dir = make_save_dir(args.model_dir)
        self.model = self.make_model()
Exemple #7
0
def main(save_dir):
    make_save_dir(save_dir)
    zense_camera = PyZenseManager()
    pt2_camera = PyPureThermal2()
    image_width, image_height = zense_camera.image_size

    res_image_width = int(image_width * 0.75)
    res_image_height = int(image_height * 0.75)
    window_image_width = int(res_image_width * 3)
    window_image_height = int(res_image_height * 1.5)

    cvui.init("capture")
    frame = np.zeros((window_image_height, window_image_width + 540, 3),
                     np.uint8)
    captured_frame_count = count_images(save_dir)

    while True:
        key = cv2.waitKey(10)
        frame[:] = (49, 52, 49)

        status = zense_camera.update(verbose=UPDATE_VERBOSE)
        status &= pt2_camera.update()
        if status:
            # Get Images
            ir_image = zense_camera.ir_image.copy()
            depth_image = zense_camera.depth_image.copy()

            thermal_image = pt2_camera.thermal_image.copy()
            thermal_image_colorized = pt2_camera.thermal_image_colorized.copy()

            # Visualize Images
            frame = draw_frames(frame, depth_image, ir_image,
                                thermal_image_colorized, res_image_width,
                                res_image_height)
            if cvui.button(frame, 50, window_image_height - 50, 130, 50,
                           "Save Result Image") or key & 0xFF == ord("s"):
                save_images(
                    depth_image,
                    ir_image,
                    thermal_image,
                    thermal_image_colorized,
                    save_dir,
                )
                captured_frame_count += 1

            if cvui.button(frame, 200, window_image_height - 50, 130, 50,
                           "Clear"):
                clean_save_dir(save_dir)
                captured_frame_count = 0

            cvui.printf(
                frame,
                900,
                window_image_height - 30,
                0.8,
                0x00FF00,
                "Number of Captured Images : %d",
                captured_frame_count,
            )
            if key & 0xFF == ord("q"):
                break

            cvui.update()
            cvui.imshow("capture", frame)

    cv2.destroyAllWindows()
Exemple #8
0
    def test(self):
        # Prepare model
        path = self.args.load_model
        state_dict = torch.load(path)['state_dict']

        self.model.load_state_dict(state_dict)

        # file path for prediction
        pred_dir = make_save_dir(self.args.pred_dir)
        filename = self.args.filename
        outfile = open(os.path.join(pred_dir, self.args.task, filename), 'w')

        # Start decoding
        data_yielder = self.data_utils.data_yielder()
        total_loss = []
        start = time.time()

        # If beam search, create sequence generator object
        self._beam_search = self.config['eval']['beam_size'] > 1
        # self._beam_search = True
        if self._beam_search:
            seq_gen = SequenceGenerator(
                self.model,
                self.data_utils,
                beam_size=self.config['eval']['beam_size'],
                no_repeat_ngram_size=self.config['eval']['block_ngram'])

        self.model.eval()
        step = 0

        # Run one batch
        for batch in data_yielder:
            step += 1
            if step % 10 == 1:
                print('Step ', step)

            # Decoding according to scheme
            if self._beam_search:
                out = seq_gen.generate(batch,
                                       pos_masking=self.config['pos_masking'],
                                       bos_token=self.data_utils.bos)
            else:
                max_length = self.config['max_len']
                if self.config['pos_masking']:
                    out = self.model.greedy_decode(batch['src'].long(),
                                                   batch['src_mask'],
                                                   max_length,
                                                   self.data_utils.bos,
                                                   batch['posmask'])
                else:
                    if self.args.task == 'joint_gen':
                        max_length = self.config['max_decode_step']
                    out = self.model.greedy_decode(batch['src'].long(),
                                                   batch['src_mask'],
                                                   max_length,
                                                   self.data_utils.bos)

            # Write sentences to file
            for l in out:
                if self._beam_search:
                    sentence = self.data_utils.id2sent(l[0]['tokens'][:-1],
                                                       True)
                else:
                    sentence = self.data_utils.id2sent(l[1:], True)
                outfile.write(sentence)
                outfile.write("\n")

        outfile.close()