Exemplo n.º 1
0
    def __init__(self, args):
        self.args = args

        data = Data(args.train_path, args.val_path, args.glove_path)
        data.build_vocab()
        train_data, val_data = data.input2tensor()
        embedding_matrix = data.build_embedding_matrix(args.embed_type,
                                                       args.embed_dim)
        train_dataset = MyDataset(train_data, data.max_len)
        val_dataset = MyDataset(val_data, data.max_len)

        self.train_dataloader = DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True)
        self.val_dataloader = DataLoader(val_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=False)

        if args.model_type == 'CNN':
            self.model = CNNModel(args, data.vocab_size,
                                  embedding_matrix).to(args.device)
        else:
            self.model = LSTMNet(args, data.vocab_size,
                                 embedding_matrix).to(args.device)

        self.loss_func = nn.CrossEntropyLoss()
        self.optim = torch.optim.Adam(self.model.parameters(),
                                      lr=args.learning_rate)

        if torch.cuda.is_available():
            print('cuda memory allocated:',
                  torch.cuda.memory_allocated(device=args.device.index))
Exemplo n.º 2
0
def main():
    for t in ['ssA', 'ssB', 'slA', 'slB', 'llA', 'llB']:
        cnf.run_type = t
        if t in ['ssA', 'ssB']:
            cnf.max_seq_len = 128
        elif t in ['slA', 'slB']:
            cnf.max_seq_len = 1024
        elif t in ['llA', 'llB']:
            cnf.max_seq_len = 2048

        # 模型
        model = BERT().cuda()
        # model = ERNIE().cuda()
        print('Using model: {}'.format(model.__class__.__name__))
        # 优化器
        optimizer = optim.AdamW(model.parameters(), lr=cnf.lr)
        # scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=2, threshold=1e-2)
        # 损失函数
        criterion = nn.CrossEntropyLoss()
        # 数据提取器
        train_dataset = MyDataset(type=cnf.run_type, mode='train')
        train_dataloader = DataLoader(train_dataset, batch_size=cnf.batch_size, shuffle=True)
        evaluate_dataset = MyDataset(type=cnf.run_type, mode='evaluate')
        evaluate_dataloader = DataLoader(evaluate_dataset, batch_size=cnf.batch_size, shuffle=True)
        test_dataset = MyDataset(type=cnf.run_type, mode='test')
        test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False)

        train(model, optimizer, criterion, train_dataloader, evaluate_dataloader)
        predict(test_dataloader)
Exemplo n.º 3
0
def make_data_train_model(dataset_name, context, iterations=10000):
    print 'Dataset: %s  Context: %s' % (dataset_name, context)
    context_emb_size = get_emb_size(dataset_name, context)
    exp_dir = os.path.join(expdir, dataset_name, context)

    train_df, val_df, test_df = get_data(dataset_name, context)
    word_vocab, category_vocab = make_vocabs(train_df, val_df, exp_dir)

    dataset = MyDataset(train_df,
                        word_vocab,
                        category_vocab,
                        max_len=max_len,
                        batch_size=batch_size)
    valdata = MyDataset(val_df,
                        word_vocab,
                        category_vocab,
                        max_len=max_len,
                        batch_size=batch_size)
    testdata = MyDataset(test_df,
                         word_vocab,
                         category_vocab,
                         max_len=max_len,
                         batch_size=batch_size)

    emb_matrix = get_emb_matrix(word_vocab, dataset_name)
    params = get_save_params(dataset_name, context, word_vocab, category_vocab,
                             context_emb_size, iterations)

    save_name = os.path.join(expdir, dataset_name, context, 'model.bin')
    train_model(dataset, valdata, params, save_name, emb_matrix)
    return testdata
def main():
    train_list = make_datapath_list(phase="train")
    val_list = make_datapath_list(phase="val")

    # Dataset
    train_dataset = MyDataset(train_list,
                              transform=ImageTransform(resize, mean, std),
                              phase="train")
    val_dataset = MyDataset(val_list,
                            transform=ImageTransform(resize, mean, std),
                            phase="val")

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=4,
                                                   shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=4,
                                                 shuffle=False)

    dataloader_dict = {"train": train_dataloader, "val": val_dataloader}

    # Network
    use_pretrained = "true"
    net = models.vgg16(pretrained=use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2)

    # Loss
    criterior = nn.CrossEntropyLoss()

    # Optimizer
    # param_to_update = []

    # update_param_name = ["classifier.6.weight", "classifier.6.bias"]

    # for name, param in net.named_parameters():
    #     if name in update_param_name:
    #         param.requires_grad = True
    #         param_to_update.append(param)
    #         print(name)
    #     else:
    #         param.requires_grad = False
    params1, params2, params3 = param_to_update(net)

    optimizer = optim.SGD([
        {
            'params': params1,
            'lr': 1e-4
        },
        {
            'params': params2,
            'lr': 5e-4
        },
        {
            'params': params3,
            'lr': 1e-3
        },
    ],
                          momentum=0.9)

    train_model(net, dataloader_dict, criterior, optimizer, num_epochs)
Exemplo n.º 5
0
def debug():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    epochs = 2
    net = MyCNN()
    train_path = C.train_path
    test_path = C.test_path

    train_ds = MyDataset(train_path)
    new_train_ds, validate_ds = dataset_split(train_ds, 0.8)
    test_ds = MyDataset(test_path, train=False)

    train_loader = dataloader(train_ds)
    new_train_loader = dataloader(new_train_ds)
    validate_loader = dataloader(validate_ds)
    test_loader = dataloader(test_ds)

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    train(epochs, new_train_loader, device, net, criterion, optimizer)
    print("validate acc:", validate(validate_loader, device, net, criterion))
    submission(csv_path=C.csv_path,
               test_loader=test_loader,
               device=device,
               model=net)
    torch.save(net.state_dict(), C.model_save_path)
Exemplo n.º 6
0
    def loading_data(self):
        # (299,341)
        # (224,256)
        # normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        normalize = Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

        train_transformations = Compose([
            Resize((341, 461)),
            RandomResizedCrop(299),
            RandomHorizontalFlip(),
            RandomVerticalFlip(),
            RandomRotation(degrees=90),
            ToTensor(), normalize
        ])

        if self.tencrop:
            val_transformations = Compose([
                Resize((341, 461)),
                TenCrop(299),
                Lambda(lambda crops: torch.stack(
                    [ToTensor()(crop) for crop in crops])),
                Lambda(lambda normal: torch.stack(
                    [normalize(nor) for nor in normal]))
            ])
        else:
            val_transformations = Compose(
                [Resize((341, 461)),
                 CenterCrop(299),
                 ToTensor(), normalize])

        train_dataset = MyDataset(
            self.data,
            data_folder="train",
            name_list=self.name_list,
            version="1",
            transform=train_transformations,
        )

        val_dataset = MyDataset(
            self.data,
            data_folder="validation",
            name_list=self.name_list,
            version="1",
            transform=val_transformations,
        )

        train_loader = data.DataLoader(train_dataset,
                                       batch_size=self.batch_size,
                                       shuffle=True,
                                       num_workers=self.workers,
                                       pin_memory=True)

        val_loader = data.DataLoader(val_dataset,
                                     batch_size=4,
                                     shuffle=False,
                                     num_workers=self.workers,
                                     pin_memory=True)

        return (train_loader, val_loader)
Exemplo n.º 7
0
def train(model, train_list, test_list):
    train_dataset = MyDataset(train_list)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=Config.batch_size,
                                  shuffle=True,
                                  num_workers=2,
                                  collate_fn=collate_fn,
                                  drop_last=True)
    model.train()

    # 计算所有epoch进行参数优化的总步数total_steps
    total_steps = int(train_dataset.__len__() * Config.epochs /
                      Config.batch_size / Config.gradient_accumulation)
    print("total train step num: {}".format(total_steps))

    optimizer = BertAdam(model.parameters(),
                         lr=Config.lr,
                         warmup=0.05,
                         t_total=total_steps)
    print('start training...')
    # 开始训练
    for epoch in range(Config.epochs):
        epoch_start_time = datetime.now()
        for batch_idx, input_ids in enumerate(train_dataloader):
            # 注意:GPT2模型的forward()函数,是对于给定的context,生成一个token,而不是生成一串token
            # GPT2Model的输入为n个token_id时,输出也是n个hidden_state,使用第n个hidden_state预测第n+1个token
            input_ids = input_ids.to(Config.device)

            outputs = model.forward(input_ids=input_ids)

            loss, accuracy = calculate_loss_and_accuracy(outputs,
                                                         labels=input_ids)
            loss.backward()

            torch.nn.utils.clip_grad_norm_(model.parameters(),
                                           Config.max_grad_norm)

            optimizer.step()
            optimizer.zero_grad()
            print('epoch:{}, step:{}, loss: {:6f}, accuracy:{:6f}'.format(
                epoch + 1, batch_idx + 1, loss, accuracy))

        average_acc, average_loss = evaluate(model, test_list)
        res = "VALID epoch:{}, loss {:6f},  acc {:6f}".format(
            epoch, average_loss, average_acc)
        print(res)
        res += '\n'
        with open('log.txt', 'a+') as f:
            f.write(res)
        # 一个epoch跑完保存一下模型
        model_path = join(Config.model_output_path,
                          'model_epoch{}'.format(epoch + 1))
        if not os.path.exists(model_path):
            os.mkdir(model_path)
        model_to_save = model.module if hasattr(model, 'module') else model
        model_to_save.save_pretrained(model_path)
        epoch_finish_time = datetime.now()
        print('跑完一个epoch花费时间为: {}'.format(epoch_finish_time -
                                          epoch_start_time))
Exemplo n.º 8
0
def detect_test_data(model_path='weights/face_mask_weights.pth', num=20):
    yolo_detector = YOLO4_inference(model_path=model_path)
    test_data = MyDataset(test_root)
    for i in random.sample(range(test_data.__len__()), num):
        img, gt = test_data.getRaw(i)
        image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        boxes, labels, scores = yolo_detector.predict(image)
        detect_test_data_show(image, gt, boxes, labels, scores)
Exemplo n.º 9
0
def main(args):
    canny = False
    writer = None
    if args.tensorboard:
        from tensorboardX import SummaryWriter
        writer = SummaryWriter()

    done_epoch = 0

    if args.model_type == "nin":
        from model_nin import MyModel
        model = MyModel()
    elif args.model_type == "tgn":
        from model_tgn import MyModel
        model = MyModel()
        canny = True
    elif args.model_type == "vgg":
        def loss(y, t):
            crossentropy = nn.CrossEntropyLoss()
            total_loss = crossentropy(y, t) 
            return total_loss
        model = models.vgg16(pretrained=True)
        model.classifier[0] = nn.Linear(8*8*512, 4096)
        model.classifier[6] = nn.Linear(4096, 27)
        model.loss = loss 

    if args.resume and os.path.exists(args.model_path):
        print("Resume training...")
        # model = Generator(Encorder(), Decoder())
        model.load_state_dict(torch.load(args.model_path))
        with open("{0}-history.csv".format(args.expname), 'r') as f:
            for i, l in enumerate(f):
                pass
            done_epoch = i
    else:
        print("Start training from scratch...")
        # model = Generator(Encorder(), Decoder())
        HEADER = ["loss_train", "loss_test", "accuracy"]
        logger = TrainLogger("{0}-history.csv".format(args.expname), header=HEADER, overwrite=True)
        del(logger)

    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)
    np.random.seed(SEED) 

    dataset_train = MyDataset(pickle_path=args.pickle_path, csv_path=args.train_list_path, root_dir=args.train_dir, canny=canny)
    dataset_test = MyDataset(pickle_path=args.pickle_path, csv_path=args.test_list_path, root_dir=args.test_dir, canny=canny)
    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
    test_loader = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)

    device = torch.device("cpu" if args.no_cuda else "cuda:0")
    model = model.to(device)
    optimizer = opt.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999))
    if args.resume:
        optimizer.load_state_dict(torch.load(args.optimizer_path))

    train(model=model, device=device, train_loader=train_loader, test_loader=test_loader,
                                  optimizer=optimizer, n_epochs=args.n_epochs, prefix=args.expname, done_epoch=done_epoch, path_checkpoint="checkpoint-{}".format(args.expname), writer=writer)
Exemplo n.º 10
0
def run(opt):
    # 读取训练集
    train_filename_list, train_label_list = data_preprocess.read_data(
        directory=opt.train_directory, dir2label_dict=opt.dir2label_dict)
    # 定义数据增强操作
    augmentation = data_preprocess.data_augmentation(opt.img_resize,
                                                     opt.img_random_crop,
                                                     mode='train')
    train_dataset = MyDataset(filenames=train_filename_list,
                              labels=train_label_list,
                              transform=augmentation)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               pin_memory=True)
    # # 改成下面这种multi-scale 主要是自定义了batch_sample
    # train_loader = torch.utils.data.DataLoader(train_dataset,
    #                                            batch_size=opt.batch_size, shuffle=True,
    #                                            pin_memory=True,
    #                                            batch_sampler= BatchSampler(RandomSampler(train_dataset),
    #                                                                        batch_size=64,
    #                                                                        drop_last=True,
    #                                                                        multiscale_step=1,
    #                                                                        img_sizes=list(range(320, 608 + 1, 32))))

    # 读取测试集
    test_filename_list, test_label_list = data_preprocess.read_data(
        directory=opt.test_directory, dir2label_dict=opt.dir2label_dict)
    # 定义数据增强操作
    test_dataset = MyDataset(filenames=test_filename_list,
                             labels=test_label_list,
                             transform=augmentation)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=opt.batch_size,
                                              shuffle=True,
                                              pin_memory=True)
    # 改成下面这种multi-scale 主要是自定义了batch_sample
    # test_loader = torch.utils.data.DataLoader(test_dataset,
    #                                           batch_size=opt.batch_size, shuffle=True,
    #                                           pin_memory=True,
    #                                           batch_sampler= BatchSampler(RandomSampler(test_dataset),
    #                                                                       batch_size=64,
    #                                                                       drop_last=True,
    #                                                                       multiscale_step=1,
    #                                                                       img_sizes=list(range(320, 608 + 1, 32))))

    # val_dataset = MyDataset(filenames=val_filename_list, labels=val_label_list, transform=augmentation)
    # val_loader = torch.utils.data.DataLoader(val_dataset,
    #                                          batch_size=opt.batch_size, shuffle=True,
    #                                          pin_memory=True)

    # 定义一个网络
    net = get_pretrain_model(opt.model_name, opt.num_classes)

    # 训练集上训练、测试集上测试效果,没有使用cv,将split设置为0
    train.train(net, 0, train_loader, test_loader, opt)
Exemplo n.º 11
0
def test(model, net):
    #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    # composed = transforms.Compose([Rescale(114), transforms.CenterCrop((112, 224),ToTensor(),ColorNormalize()])
    with torch.no_grad():
        # 引入测试集数据
        dataset = MyDataset(opt.video_path,
                            opt.val_list,
                            max_frame_len=opt.max_frame_len,
                            trainflag=False)
        # 输出测试集大小
        print('num_test_data:{}'.format(len(dataset.data)))
        # ?
        model.eval()
        # 初始化数据加载器
        loader = dataset2dataloader(dataset, shuffle=False)
        loss_list = []
        cer = []
        crit = nn.CTCLoss(blank=0, reduction='mean')
        tic = time.time()
        for (i_iter, input) in enumerate(loader):
            # 存储图片id集、标签集
            # 记录图片数和标签数
            video = input.get('video').to(device)
            txt = input.get('txt').to(device)
            vid_len = input.get('vid_len').to(device)
            txt_len = input.get('txt_len').to(device)

            y = net(video)

            loss = crit(
                y.transpose(0, 1).log_softmax(-1), txt, vid_len.view(-1),
                txt_len.view(-1)).detach().cpu().numpy()
            loss_list.append(loss)
            pred_txt = ctc_decode(y)

            truth_txt = [MyDataset.arr2txt(txt[_]) for _ in range(txt.size(0))]
            cer.extend(MyDataset.cer(pred_txt, truth_txt))
            if (i_iter % opt.display == 0):
                # 剩余时间
                v = 1.0 * (time.time() - tic) / (i_iter + 1)
                eta = v * (len(loader) - i_iter) / 3600.0

                print(''.join(101 * '-'))
                print('{:<50}|{:>50}'.format('predict', 'truth'))
                print(''.join(101 * '-'))
                for (predict, truth) in list(zip(pred_txt, truth_txt))[:10]:
                    print('{:<50}|{:>50}'.format(predict, truth))
                print(''.join(101 * '-'))
                print('test_iter={},eta={},cer={}'.format(
                    i_iter, eta,
                    np.array(cer).mean()))
                print(''.join(101 * '-'))

        return (np.array(loss_list).mean(), np.array(cer).mean())
Exemplo n.º 12
0
def get_train_valid_dataset(data_dir):
	training_filenames, trainY = utils.load_train_filename_and_labels(data_dir)
	training_filenames, valid_filenames, trainY, validY = utils.train_test_split(training_filenames, trainY, split_ratio=0.1)
	
	trsfms = transforms.Compose([
				transforms.RandomCrop(256, pad_if_needed=True, padding_mode='symmetric'),
				transforms.RandomHorizontalFlip(),
				transforms.RandomRotation(15),
				transforms.ToTensor(),
				])
	return MyDataset(os.path.join(data_dir, 'training'), transforms=trsfms), MyDataset(os.path.join(data_dir, 'validation'), transforms=trsfms)
Exemplo n.º 13
0
def main():

    # compile
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")
    badnet = BadNet().to(device)
    if os.path.exists("./models/badnet.pth"):
        badnet.load_state_dict(
            torch.load("./models/badnet.pth", map_location=device))
    criterion = nn.MSELoss()
    sgd = optim.SGD(badnet.parameters(), lr=0.001, momentum=0.9)
    epoch = 100

    # dataset
    train_data = datasets.MNIST(root="./data/", train=True, download=False)
    test_data = datasets.MNIST(root="./data/", train=False, download=False)
    train_data = MyDataset(train_data,
                           0,
                           portion=0.1,
                           mode="train",
                           device=device)
    test_data_orig = MyDataset(test_data,
                               0,
                               portion=0,
                               mode="train",
                               device=device)
    test_data_trig = MyDataset(test_data,
                               0,
                               portion=1,
                               mode="test",
                               device=device)
    train_data_loader = DataLoader(dataset=train_data,
                                   batch_size=64,
                                   shuffle=True)
    test_data_orig_loader = DataLoader(dataset=test_data_orig,
                                       batch_size=64,
                                       shuffle=True)
    test_data_trig_loader = DataLoader(dataset=test_data_trig,
                                       batch_size=64,
                                       shuffle=True)

    # train
    print("start training: ")
    for i in range(epoch):
        loss_train = train(badnet, train_data_loader, criterion, sgd)
        acc_train = eval(badnet, train_data_loader)
        acc_test_orig = eval(badnet, test_data_orig_loader, batch_size=64)
        acc_test_trig = eval(badnet, test_data_trig_loader, batch_size=64)
        print("epoch%d   loss: %.5f  training accuracy: %.5f  testing Orig accuracy: %.5f  testing Trig accuracy: %.5f"\
              % (i + 1, loss_train, acc_train, acc_test_orig, acc_test_trig))
        torch.save(badnet.state_dict(), "./models/badnet.pth")
Exemplo n.º 14
0
 def train(self, round_limit=10):
     faceDataset = MyDataset(self.dataset_path)
     valDataset = MyDataset(self.valdataset_path)
     # batch_size一般不要超过百分之一 经验值
     dataloader = DataLoader(faceDataset,
                             batch_size=128,
                             shuffle=True,
                             num_workers=4)
     valdataloader = DataLoader(valDataset,
                                batch_size=32,
                                shuffle=True,
                                num_workers=4)
     acc_end = 0.7457
     epoch = 0
     round = 0
     while True:
         predict = torch.tensor([]).cuda()
         label = torch.tensor([]).cuda()
         for xs, ys in dataloader:
             feature, cls = self.net(xs.cuda())
             loss = self.loss_fn(torch.log(cls), ys.cuda())
             self.optimizer.zero_grad()
             loss.backward()
             self.optimizer.step()
             print("epoch = {},loss= {}".format(epoch, loss))
         for xs2, ys2 in valdataloader:
             feature2, cls2 = self.net(xs2.cuda())
             loss = self.loss_fn(torch.log(cls2), ys2.cuda())
             self.optimizer.zero_grad()
             loss.backward()
             self.optimizer.step()
             ys2 = ys2.to(self.device, dtype=torch.float32)
             predict = torch.cat((predict, cls2), dim=0)
             label = torch.cat((label, ys2), dim=0)
         epoch += 1
         print(torch.argmax(predict, dim=1), label, len(label))
         acc = torch.div(
             torch.sum(torch.argmax(predict, dim=1) == label).float(),
             len(label))
         if acc > acc_end:
             torch.save(self.net.state_dict(), self.save_path)
             acc_end = acc
             print("save success,acc更新为{}".format(acc))
             round = 0
         else:
             round += 1
             print("精确度为{},没有提升,参数未更新,acc仍为{},第{}次未更新".format(
                 acc, acc_end, round))
             if round >= round_limit:
                 print("最终acc为{}".format(acc_end))
                 break
Exemplo n.º 15
0
def test(model, net):
    with torch.no_grad():
        dataset = MyDataset(opt.video_path, opt.anno_path, opt.val_list,
                            opt.vid_padding, opt.txt_padding, 'test')

        print('num_test_data:{}'.format(len(dataset.data)))
        model.eval()
        loader = dataset2dataloader(dataset, shuffle=False)
        loss_list = []
        wer = []
        cer = []
        crit = nn.CTCLoss()
        tic = time.time()
        for (i_iter, input) in enumerate(loader):
            vid = input.get('vid').cuda()
            txt = input.get('txt').cuda()
            vid_len = input.get('vid_len').cuda()
            txt_len = input.get('txt_len').cuda()

            y = net(vid)

            loss = crit(
                y.transpose(0, 1).log_softmax(-1), txt, vid_len.view(-1),
                txt_len.view(-1)).detach().cpu().numpy()
            loss_list.append(loss)
            pred_txt = ctc_decode(y)

            truth_txt = [
                MyDataset.arr2txt(txt[_], start=1) for _ in range(txt.size(0))
            ]
            wer.extend(MyDataset.wer(pred_txt, truth_txt))
            cer.extend(MyDataset.cer(pred_txt, truth_txt))
            if (i_iter % opt.display == 0):
                v = 1.0 * (time.time() - tic) / (i_iter + 1)
                eta = v * (len(loader) - i_iter) / 3600.0

                print(''.join(101 * '-'))
                print('{:<50}|{:>50}'.format('predict', 'truth'))
                print(''.join(101 * '-'))
                for (predict, truth) in list(zip(pred_txt, truth_txt))[:10]:
                    print('{:<50}|{:>50}'.format(predict, truth))
                print(''.join(101 * '-'))
                print('test_iter={},eta={},wer={},cer={}'.format(
                    i_iter, eta,
                    np.array(wer).mean(),
                    np.array(cer).mean()))
                print(''.join(101 * '-'))

        return (np.array(loss_list).mean(), np.array(wer).mean(),
                np.array(cer).mean())
Exemplo n.º 16
0
def main():
    train_list = make_datapath_list("train")
    val_list = make_datapath_list("val")

    # dataset
    train_dataset = MyDataset(train_list,
                              transform=ImageTransform(resize, mean, std),
                              phase="train")
    val_dataset = MyDataset(val_list,
                            transform=ImageTransform(resize, mean, std),
                            phase="val")

    # dataloader
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size,
                                                   shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size,
                                                 shuffle=False)
    dataloader_dict = {"train": train_dataloader, "val": val_dataloader}

    # network
    use_pretrained = True
    net = models.vgg16(pretrained=use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2)

    # loss
    criterior = nn.CrossEntropyLoss()

    # optimizer
    params1, params2, params3 = params_to_update(net)
    optimizer = optim.SGD([
        {
            'params': params1,
            'lr': 1e-4
        },
        {
            'params': params2,
            'lr': 5e-4
        },
        {
            'params': params3,
            'lr': 1e-3
        },
    ],
                          momentum=0.9)

    # training
    train_model(net, dataloader_dict, criterior, optimizer, num_epochs)
Exemplo n.º 17
0
def main(mode):

    config = load_config()

    # 使用tensorboard
    time_now = datetime.now().isoformat()

    if not os.path.exists(config.RUN_PATH):
        os.mkdir(config.RUN_PATH)
    writer = SummaryWriter(log_dir=config.RUN_PATH)

    # 随机数种子
    torch.manual_seed(config.SEED)
    torch.cuda.manual_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    # INIT GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    net = UNet(1).to(config.DEVICE)

    optimizer = optim.Adam(net.parameters(), betas=(0.5, 0.999), lr=config.LR)
    #criterion = nn.CrossEntropyLoss()  # 定义loss函数
    criterion = nn.BCELoss()
    # 加载数据集
    if mode == 1:

        train_dataset = MyDataset(config, config.TRAIN_PATH)
        len_train = len(train_dataset)
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=config.BATCH_SIZE, shuffle=True)
        iter_per_epoch = len(train_loader)
        train_(config, train_loader, net, optimizer, criterion, len_train,
               iter_per_epoch, writer)

    if mode == 2:

        test_dataset = MyDataset(config, config.TEST_PATH)
        test_loader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=1,
                                                  shuffle=False)
        test(config, test_loader, net, criterion)
Exemplo n.º 18
0
def main() :
    train_list = make_datapath_list("train")
    val_list = make_datapath_list("val")

    #dataset
    train_dataset = MyDataset(train_list,transform=ImageTransform(resize,mean,std),phase='train')
    val_dataset = MyDataset(val_list,transform=ImageTransform(resize,mean,std),phase='val')

    #dataloader
    
    train_dataloader = torch.utils.data.DataLoader(train_dataset,batch_size,shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,batch_size,shuffle=False)
    dataloader_dict = {"train":train_dataloader,"val":val_dataloader}

    #NETWORK
    use_pretrained = True
    net = models.vgg16(pretrained = use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2,bias=True)
    print(net)
    #setting mode
    net = net.train()

    #LOSS
    criterior = nn.CrossEntropyLoss()

    #OPTIMIZER
    # Update thong so mong muon
    # params_to_update = []
    # update_params_name = ["classifier.6.weight","classifier.6.bias"]
    # for name,param in net.named_parameters() :
    #     if name in update_params_name :
    #         param.requires_grad = True
    #         params_to_update.append(param)
    #         print(name)
    #     else:
    #         param.requires_grad = False
    params1,params2,params3 = params_to_update(net)
    # params = update trong so luu vao
    #lr = he so hoc
    #momentun = 
    optimizer = optim.SGD([
        {"params" :params1, "lr" : 1e-4},
        {"params" :params2, "lr" : 5e-4},
        {"params" :params3, "lr" : 1e-3} 
    ],momentum = 0.9)

    #training

    train_model(net,dataloader_dict,criterior, optimizer, num_epoch)
Exemplo n.º 19
0
    def loading_data(self):
        # normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        normalize = Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        if self.tencrop:
            val_transformations = Compose([
                Resize(341),
                TenCrop(299),
                Lambda(lambda crops: torch.stack(
                    [ToTensor()(crop) for crop in crops])),
                Lambda(lambda normal: torch.stack(
                    [normalize(nor) for nor in normal]))
            ])
        else:
            val_transformations = Compose(
                [Resize(341),
                 CenterCrop(299),
                 ToTensor(), normalize])

        test_dataset = MyDataset(
            self.data,
            name_list=self.name_list,
            data_folder="test",
            version="1",
            transform=val_transformations,
        )

        test_loader = data.DataLoader(test_dataset,
                                      batch_size=self.batch_size,
                                      shuffle=False,
                                      num_workers=self.workers,
                                      pin_memory=False)

        return test_loader
Exemplo n.º 20
0
    def train(
        self,
        data_path,
        criterion,
        max_epoch=50,
        batch_size=1,
        k_fold=False,
        n_k=5,
        n_discontinue=5,
    ):
        model = self.model
        model.train()
        dataset = MyDataset(data_path)
        len_dataset = len(dataset)
        train_test_indicies = createIndicies(len_dataset, k_fold)
        optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-5)
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        tb_writer = SummaryWriter(log_dir=self.log_dir)
        for train_indices, test_indices in train_test_indicies:  # n_k times
            split = DataSplit(
                dataset, train_indices, test_indices, val_train_split=0.1, device=DEVICE
            )
            train_loader, valid_loader, test_loader = split.get_split(
                batch_size, num_workers=4
            )
            loss_table, loss_list = "", []
            epoch_index = 1
            controller, condition = Controller(max_epoch, n_discontinue), True
            while condition:
                train_loss, valid_loss = trainOneEpoch(
                    model,
                    criterion,
                    train_loader,
                    valid_loader,
                    optimizer,
                    DEVICE,
                    epoch_index,
                    tb_writer,
                )
                loss_table = displayLoss(
                    loss_table, epoch_index, train_loss, valid_loss
                )
                loss_list.append([train_loss, valid_loss])
                scheduler.step()
                if epoch_index % EVAL_INTERVAL == 0:
                    PATH = "./cifar_net.pth"
                    torch.save(model.state_dict(), PATH)
                epoch_index, condition = controller.update(valid_loss)

            print("Finished Training")

            dat = np.array(loss_list)
            x = np.arange(dat.shape[0])
            plt.plot(x, dat)
            plt.savefig(
                "/n/work1/ooyama/plot_image/learning_curve"
                + str(datetime.datetime.now())
                + ".png"
            )
        tb_writer.close()
Exemplo n.º 21
0
def ctc_decode(y):
    y = y.argmax(-1)
    t = y.size(0)
    result = []
    for i in range(t+1):
        result.append(MyDataset.ctc_arr2txt(y[:i], start=1))
    return result
Exemplo n.º 22
0
def evaluate(model, device, test_list, multi_gpu, args):
    logger.info("start evaluating model")
    model.eval()
    logger.info('starting evaluating')
    # 记录tensorboardX
    tb_writer = SummaryWriter(log_dir=args.writer_dir)
    test_dataset = MyDataset(test_list)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn,
                                 drop_last=True)
    with torch.no_grad():
        for batch_idx, input_ids in enumerate(test_dataloader):
            input_ids.to(device)
            outputs = model.forward(input_ids=input_ids)
            loss, accuracy = calculate_loss_and_accuracy(outputs,
                                                         labels=input_ids,
                                                         device=device)

            if multi_gpu:
                loss = loss.mean()
                accuracy = accuracy.mean()
            if args.gradient_accumulation > 1:
                loss = loss / args.gradient_accumulation
                accuracy = accuracy / args.gradient_accumulation
            logger.info("evaluate batch {} ,loss {} ,accuracy {}".format(
                batch_idx, loss, accuracy))
            # tb_writer.add_scalar('loss', loss.item(), overall_step)
        logger.info("finishing evaluating")
Exemplo n.º 23
0
def dataset_func():
    """
    Initialize Custom Dataset
    :return: Initialized Custom Dataset
    """
    # Dataset Should Return tuple(image(tensor(width,height,3)), label(tensor)))
    return MyDataset()
Exemplo n.º 24
0
def train_model(config):

    data_transforms = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(0.5, 0.5)])
    my_dataset = MyDataset("session-2/data/data/data/",
                           "session-2/data/chinese_mnist.csv",
                           transform=data_transforms)
    train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
        my_dataset, [10000, 2500, 2500])
    train_loader = DataLoader(train_dataset,
                              batch_size=config["batch_size"],
                              shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=config["batch_size"])
    test_loader = DataLoader(test_dataset, batch_size=config["batch_size"])

    my_model = MyModel(config["h1"], config["h2"], config["h3"],
                       config["h4"]).to(device)

    optimizer = optim.Adam(my_model.parameters(), config["lr"])
    for epoch in range(config["epochs"]):
        loss, acc = train_single_epoch(my_model, train_loader, optimizer)
        print(f"Train Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")
        loss, acc = eval_single_epoch(my_model, val_loader)
        print(f"Eval Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")

    loss, acc = eval_single_epoch(my_model, test_loader)
    print(f"Test loss={loss:.2f} acc={acc:.2f}")

    return my_model
Exemplo n.º 25
0
Arquivo: train.py Projeto: ttaa9/PRIN
def load_train_set(rand_rot=False, aug=True):

    # load data
    f0 = h5py.File(os.path.join(data_path, 'ply_data_train0.h5'))
    f1 = h5py.File(os.path.join(data_path, 'ply_data_train1.h5'))
    f2 = h5py.File(os.path.join(data_path, 'ply_data_train2.h5'))
    f3 = h5py.File(os.path.join(data_path, 'ply_data_train3.h5'))
    f4 = h5py.File(os.path.join(data_path, 'ply_data_train4.h5'))
    f5 = h5py.File(os.path.join(data_path, 'ply_data_train5.h5'))
    f6 = h5py.File(os.path.join(data_path, 'ply_data_val0.h5'))
    f = [f0, f1, f2, f3, f4, f5, f6]

    data = f[0]['data'][:]
    label = f[0]['label'][:]
    seg = f[0]['pid'][:]

    for i in range(1, 7):
        data = np.concatenate((data, f[i]['data'][:]), axis=0)
        label = np.concatenate((label, f[i]['label'][:]), axis=0)
        seg = np.concatenate((seg, f[i]['pid'][:]), axis=0)

    for ff in f:
        ff.close()

    print(data.shape, label.shape, seg.shape)
    return MyDataset(data, label, seg, rand_rot=rand_rot, aug=aug)
Exemplo n.º 26
0
def evaluate(model, device, test_list, multi_gpu, args):
    model_path = join(args.model_output_path, "saved.pt")
    if os.path.exists(model_path):
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['model'])
    logger.info("start evaluating model")
    model.eval()
    logger.info('starting evaluating')
    test_dataset = MyDataset(test_list)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)
    with torch.no_grad():
        for batch_idx, input_ids in enumerate(test_dataloader):
            input_ids.to(device)
            mu, logvar, bow_probs = model.forward(input=input_ids)
            bow_loss = calculate_bow(bow_probs, input_ids, device)

            loss = bow_loss

            if multi_gpu:
                loss = loss.mean()
            if args.gradient_accumulation > 1:
                loss = loss / args.gradient_accumulation
            logger.info("evaluate batch {}, loss {:.6}".format(
                batch_idx, loss))
        logger.info("finishing evaluating")
Exemplo n.º 27
0
def evaluate(model, device, test_list, args):
    model_path = join(args.model_output_path, "saved.pt")
    if os.path.exists(model_path):
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['model'])
    logger.info("start evaluating model")
    model.eval()
    logger.info('starting evaluating')
    test_dataset = MyDataset(test_list)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)
    with torch.no_grad():
        for batch_idx, input_ids in enumerate(test_dataloader):
            input_ids = input_ids.to(device)
            outputs, mu, logvar, bow_probs = model.forward(input=input_ids)
            # anneal_function, step, k, x0
            ce, accuracy = calculate_loss_and_accuracy(outputs,
                                                       labels=input_ids,
                                                       device=device)
            kld = (-0.5 *
                   torch.sum(logvar - torch.pow(mu, 2) - torch.exp(logvar) + 1,
                             1)).mean().squeeze()

            bow_loss = calculate_bow(bow_probs, input_ids, device)

            loss = ce + 0.5 * kld + args.bow_weight * bow_loss

            logger.info(
                "evaluate batch {}, ce {:.6}, kld {:.6}, bow {:.6}, loss {:.6}, accuracy {:.6}"
                .format(batch_idx, ce, kld, bow_loss, loss, accuracy))
        logger.info("finishing evaluating")
Exemplo n.º 28
0
def test_5_possibility(root):

    test_data = MyDataset(txt=root+'label/simple_test.txt', transform=transforms.ToTensor())
    test_loader = DataLoader(dataset=test_data, batch_size=64)
    
    model = Net()
    model.load_state_dict(torch.load('weight/result.pkl'))
    if(use_gpu):
        model = model.cuda()      
    model.eval()
    
    for batch_x, batch_y in test_loader:
        batch_x, batch_y = Variable(batch_x, volatile=True), Variable(batch_y, volatile=True)
        if(use_gpu):
            batch_x, batch_y = batch_x.cuda(), batch_y.cuda()
        out = model(batch_x)     
        if(use_gpu):
            out = out.cpu()
        out_array = out.data.numpy()
        for i in range(len(out_array)):
            for j in range(len(out_array[i])):
                if out_array[i][j] < 0:
                    out_array[i][j] = 0                           
            total = sum(out_array[i])
            for j in range(len(out_array[i])):                      
                out_array[i][j] /= total
                
            print(out_array[i])
Exemplo n.º 29
0
    def train(self):
        self.model.train()
        best_acc = 0.0
        save_model_prefix = os.path.join(self.model_path,
                                         self.config.model_prefix)
        for epoch in range(self.num_epochs):
            self.logger.info("Epoch %d/%d" % (epoch + 1, self.num_epochs))
            start_time = time.time()
            for batch in self.train_data_loader:
                output = self.model(MyDataset.to(batch, self.config.device))
                self.model.zero_grad()
                loss = self._calc_loss(output, batch)
                loss.backward()
                self.optimizer.step()
                self.writer.add_scalar("scalar/loss", loss.cpu().item(), epoch)

            time_diff = time.time() - start_time
            self.logger.info("epoch %d time consumed: %dm%ds." %
                             (epoch + 1, time_diff // 60, time_diff % 60))
            # evaluate model
            cur_acc = self.eval_dev(self.dev_data_loader)
            self.model.train()
            self.logger.info("Current accuracy: %.3f" % cur_acc)
            self.writer.add_scalar("scalar/accuracy", cur_acc)
            if cur_acc > best_acc:  # and epoch > 10:
                save_filename = save_model_prefix + str(cur_acc)
                torch.save(self.model.state_dict(), save_filename)
                best_acc = cur_acc
Exemplo n.º 30
0
def test_5_class(root):     
    
    correct = [0, 0, 0, 0, 0]
    
    test_data = MyDataset(txt=root+'label/mytest.txt', transform=transforms.ToTensor())
    test_loader = DataLoader(dataset=test_data, batch_size=64)
    
    test_num_list = [0,0,0,0,0]
    file_read = open(root + 'label/mytest.txt', 'r')
    file_list = file_read.readlines()
    file_read.close()
    for i in range(len(file_list)):
        index = int(file_list[i].split(' ')[1])
        test_num_list[index] += 1
    
    model = Net()
    model.load_state_dict(torch.load('weight/result.pkl'))
    if(use_gpu):
        model = model.cuda()
    model.eval()
    
    for batch_x, batch_y in test_loader:    
        batch_x, batch_y = Variable(batch_x, volatile=True), Variable(batch_y, volatile=True)
        if(use_gpu):
            batch_x, batch_y = batch_x.cuda(), batch_y.cuda()
        out = model(batch_x)
        pred = torch.max(out, 1)[1]
        for i in range(len(pred)):
            if(pred[i].data[0] == batch_y[i].data[0]):
                correct[pred[i].data[0]] += 1          
    
    for i in range(len(correct)):
        correct[i] /= test_num_list[i]
    print(correct)