Exemple #1
0
def main():
    freeze_support()
    parser = create_main_args_parser()
    args = parser.parse_args()

    params = ()
    for param_name in ['dir1', 'dir2']:
        if param_name in args:
            params += (args.__getattribute__(param_name), )

    init_program(params[0])
    args.command(*params)
    def improve(self):
        freeze_support()

        os.environ['OMP_NUM_THREADS'] = '1'
        os.environ['CUDA_VISIBLE_DEVICES'] = ""

        processes = []

        for _ in range(0, BatchA3C.NUM_PROCESS):
            p = mp.Process(target=BatchA3C._improve,
                           args=(self.gloal_value_estimator,
                                 self.global_policy, self.env,
                                 self.num_episodes))
            p.start()
            processes.append(p)

        for p in processes:
            p.join()
                              output_feat=args.output_size,
                              seq_len=args.obs_seq_len,
                              kernel_size=args.kernel_size,
                              pred_seq_len=args.pred_seq_len,
                              hot_enc_length=len(config.labels)).cuda()
        model.load_state_dict(torch.load(model_path))
        model.cuda()

        ade_ = 999999
        fde_ = 999999
        aade_ = 999999
        afde_ = 999999

        print("Testing ....")
        ad, fd, aad, afd, raw_data_dic_ = test()
        ade_ = min(ade_, ad)
        fde_ = min(fde_, fd)

        aade_ = min(aade_, aad)
        afde_ = min(afde_, afd)

        ade_ls.append(ade_)
        fde_ls.append(fde_)
        print("mADE:", ade_, " mFDE:", fde_)
        print("aADE:", aade_, "aFDE:", afde_)


if __name__ == '__main__':
    freeze_support()
    main()
Exemple #4
0
    def get_flair_vectors(cls,
                          raw_sentences: Union[List[str], List[List[str]]],
                          flair_model_path: str,
                          flair_algorithm: str,
                          retrain_corpus_path: str = None,
                          epochs: int = 10):
        freeze_support()

        # retrain
        if retrain_corpus_path:
            if not os.path.isdir(retrain_corpus_path):
                raw_sentences = cls.build_flair_corpus(raw_sentences,
                                                       retrain_corpus_path)
            cls.retrain_flair(corpus_path=retrain_corpus_path,
                              model_path_dest=flair_model_path,
                              flair_algorithm=flair_algorithm,
                              epochs=epochs)
        if os.path.exists(os.path.dirname(flair_model_path)):
            flair_model_path = os.path.join(flair_model_path, 'best-lm.pt')

        use_embedding, _ = cls.determine_algorithm_from_string(
            flair_algorithm_string=flair_algorithm)

        embedding = use_embedding(flair_model_path)

        if any(isinstance(el, list) for el in raw_sentences):
            use_tokenizer = False
            raw_sentences = [
                ' '.join(raw_sentence) for raw_sentence in raw_sentences
                if len(raw_sentence) > 0
            ]
        else:
            use_tokenizer = True

        flair_sents = [
            Sentence(raw_sentence, use_tokenizer=use_tokenizer)
            for raw_sentence in tqdm(raw_sentences,
                                     desc="Convert to flair",
                                     total=len(raw_sentences))
            if raw_sentence != '' and len(raw_sentence) > 0
        ]

        flair_sents = [
            flair_sent for flair_sent in flair_sents
            if flair_sent and len(flair_sent) > 0
        ]

        # keyed_vecs_o = defaultdict(list)
        # for flair_sentence in tqdm(flair_sents, desc='Embed sentences', total=len(flair_sents)):
        #     embedding.embed(flair_sentence)
        #     for token in flair_sentence:
        #         keyed_vecs_o[token.text].append(token.embedding.cpu())
        # keyed_vecs_o = {key: np.array(torch.mean(torch.stack(vecs), 0).cpu()) for key, vecs in keyed_vecs_o.items()}

        keyed_vecs = {}
        for flair_sentence in tqdm(flair_sents,
                                   desc='Embed sentences',
                                   total=len(flair_sents)):
            try:
                embedding.embed(flair_sentence)
            except IndexError:
                continue
            for token in flair_sentence:
                if token.text in keyed_vecs:
                    cur, inc = keyed_vecs[token.text]
                    new_token_embedding = token.embedding.cpu()
                    # print(len(np.array(new_token_embedding)))
                    if new_token_embedding.size() == cur.size():
                        keyed_vecs[token.text] = (cur +
                                                  (new_token_embedding - cur) /
                                                  (inc + 1), inc + 1)
                else:
                    keyed_vecs[token.text] = (token.embedding.cpu(), 1)
            flair_sentence.clear_embeddings()
        keyed_vecs = {
            key: np.array(vecs[0])
            for key, vecs in keyed_vecs.items()
        }
        keyed_vecs = {
            key: vecs
            for key, vecs in keyed_vecs.items() if len(vecs) != 0
        }
        # for key, vec in keyed_vecs.items():
        #     if len(vec) != 3072:
        #         print(key, len(vec))
        return Embeddings.to_gensim_binary(keyed_vecs)
Exemple #5
0
            topic,n = cmd_sub.recv()
            if "notify.eye_process.should_start" in topic:
                eye_id = n['eye_id']
                if not eyes_are_alive[eye_id].value:
                    Process(target=eye, name='eye{}'.format(eye_id), args=(
                            timebase,
                            eyes_are_alive[eye_id],
                            ipc_pub_url,
                            ipc_sub_url,
                            ipc_push_url,
                            user_dir,
                            app_version,
                            eye_id, glint_queue, glint_vector_queue
                            )).start()
            elif "notify.launcher_process.should_stop" in topic:
                break
            elif "notify.meta.should_doc" in topic:
                cmd_push.notify({
                    'subject':'meta.doc',
                    'actor':'launcher',
                    'doc':launcher.__doc__})

        for p in active_children(): p.join()


if __name__ == '__main__':
    freeze_support()
    if platform.system() == 'Darwin':
        set_start_method('spawn')
    launcher()
Exemple #6
0
import model
import loss
from option import args
from checkpoint import Checkpoint
from trainer import Trainer


print("main scale >>"+str(args.scale[0]))
utility.set_seed(args.seed)
checkpoint = Checkpoint(args)

if checkpoint.ok:
    loader = data.Data(args)
    model = model.Model(args, checkpoint)
    loss = loss.Loss(args, checkpoint) if not args.test_only else None
    t = Trainer(args, loader, model, loss, checkpoint)
    def main():
        while not t.terminate():
            t.train()
            t.test()

        checkpoint.done()

    if __name__ == '__main__':  # 중복 방지를 위한 사용
        freeze_support()  # 윈도우에서 파이썬이 자원을 효율적으로 사용하게 만들어준다.
        main()