コード例 #1
0
ファイル: MSDN.py プロジェクト: nguyendu394/Perdestrian-
def test(pretrain):
    print('TESTING MSDN...')
    full_transform=transforms.Compose([ToTensor(),
                                       Normalize(cfg.BGR_MEAN,cfg.BGR_STD)])

    params = {'batch_size': 1,
              'shuffle':False,
              'num_workers':cfg.TRAIN.NUM_WORKERS}
    print(params)
    print('score thress: {}'.format(cfg.TEST.THRESS ))

    my_dataset = MyDataset(imgs_csv=cfg.TEST.IMGS_CSV,rois_csv=cfg.TEST.ROIS_CSV,
    root_dir=cfg.TEST.ROOT_DIR, ther_path=cfg.TEST.THERMAL_PATH,transform = full_transform,train=False)
    print(my_dataset.__len__())
    dataloader = DataLoader(my_dataset, **params)

    MSDN_net = MyMSDN()
    MSDN_net.to(cfg.DEVICE)

    print('pretrain: ' + pretrain)
    MSDN_net.load_state_dict(torch.load(pretrain))

    running_loss = 0.0
    st = time.time()

    test_file = 'mymodel/MSDN/test1/test70_{}.txt'.format(pretrain.split('/')[-1])
    f = open(test_file,'a')
コード例 #2
0
ファイル: train.py プロジェクト: by929/sgdet-gat
def train(epoch, pos_weight = None, batch_size = 100):
    t = time.time()
    model.train()

    train_feat_files, train_edge_files = generate_filenames('train')
    train_dataset = MyDataset('train', train_feat_files, train_edge_files)
    
    train_iter = Data.DataLoader(dataset = train_dataset, batch_size = 1)
    img_num = 1000
    train_output = None
    train_labels = None

    for img_id, (x, y) in enumerate(train_iter):
        # print(img_id)
        x = x[0].numpy()
        y = y[0].numpy()
        adj, features, labels, idx_train = load_data(x, y)

        if args.cuda:
            features = features.cuda()
            adj = adj.cuda()
            labels = labels.cuda()
            idx_train = idx_train.cuda()

        features, adj, labels = Variable(features), Variable(adj), Variable(labels)
        output = model(features, adj)
        idx_train = sample_training(labels, idx_train)

        if train_output is None:
            train_output = output[idx_train]
            train_labels = labels[idx_train]
        else:
            train_output = torch.cat((train_output, output[idx_train]), 0)
            train_labels = torch.cat((train_labels, labels[idx_train]), 0)

        if (img_id+1) % batch_size == 0 or (img_id+1) == img_num:
            if pos_weight is None:
                loss_train = F.nll_loss(train_output, train_labels)
            else:
                loss_train = F.nll_loss(train_output, train_labels, weight=pos_weight)
            
            loss_data = loss_train.data.item()
            acc_train, recall_bg, recall_nobg, precision_bg, precision_nobg \
                        = evaluation_train(train_output, train_labels)
            
            optimizer.zero_grad()
            loss_train.backward()
            optimizer.step()
            train_output = None
            train_labels = None

            print('Epoch: {:04d}'.format(epoch+1),
                  'loss_train: {:.6f}'.format(loss_data),
                  'acc_train: {:.6f}'.format(acc_train.data.item()),
                  'recall_bg: {:.6f}'.format(recall_bg.data.item()),
                  'recall_nobg: {:.6f}'.format(recall_nobg.data.item()),
                  'precision_bg: {:.6f}'.format(precision_bg.data.item()),
                  'precision_nobg: {:.6f}'.format(precision_nobg.data.item()),
                  'time: {:.4f}s'.format(time.time() - t))
    return loss_data
コード例 #3
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type
        self.input_size = args.input_size
        self.pre_train_model = args.model
        self.output_dim = 8
        self.z_dim = 100
        self.max_length = 64
        self.lambda_ = 10
        self.n_critic = 5  # the number of iterations of the critic per generator iteration

        # load dataset
        self.dataset = MyDataset()
        self.data_loader = DataLoader(self.dataset,
                                      batch_size=self.batch_size,
                                      shuffle=True)

        self.iter = self.data_loader.__iter__()

        # networks init
        self.G = G(self.gpu_mode)
        self.D = D(self.gpu_mode)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.pre_train_model:
            dic_D = torch.load(self.pre_train_model)
            dic_G = torch.load(
                self.pre_train_model[:self.pre_train_model.rfind('D')] +
                'G.pth')
            self.G.load_state_dict(dic_G['state_dict'])
            self.G_optimizer.load_state_dict(dic_G['optimizer'])
            self.D.load_state_dict(dic_D['state_dict'])
            self.D_optimizer.load_state_dict(dic_D['optimizer'])

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # fixed noise
        self.sample_z_ = torch.rand((self.batch_size, self.z_dim))
        if self.gpu_mode:
            self.sample_z_ = self.sample_z_.cuda()
コード例 #4
0
ファイル: test.py プロジェクト: hanjuTsai2/ML2019SPRING
def main():
    x_test = LoadData(sys.argv[1])
    
    val_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.CenterCrop(44),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0],
                             std=[255])
    ])
    batch_size = 128
    test_set = MyDataset(x_test, None, val_transform)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=8)
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    model = Classifier().to(device)
    model = loading_compression(model)

    ans = np.array([])
    for i, data in enumerate(test_loader):
        pred = model(data.to(device))
        ans = np.append(ans,np.argmax(pred.data.cpu().numpy(), axis=1))
    
    ans = ans.astype(int)
    df = [[cnt,i] for cnt, i in enumerate(ans)]
    df = pd.DataFrame(df, columns=['id', 'label'])
    df.to_csv(sys.argv[2], index=None)
コード例 #5
0
ファイル: train.py プロジェクト: flowerrin/Transformer
def result_write(transformer, n):
    data_w = open("result.txt", "w")
    
    pair = pairs_devtest
    inputs, outputs = map(list, zip(*pair))
    test_pairs = [tensorsFromPair(inputs[i].split(" "), outputs[i].split(" "))
                  for i in range(n)]
    
    dataset = MyDataset(test_pairs)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=50, shuffle=False, num_workers=0, collate_fn=my_collate_fn)
    
    # idx : [inputs, targets], [ip_len, op_len]
    # 1iter : [batch_size, length]
    # batch=96, text_size=1784
    for idx in tqdm(dataloader, ascii = True):
        batch = len(idx[0][0])
        test_pair = idx[0]
        lengths = idx[1]
        op_len = lengths[1]
        
        input_tensor = torch.tensor(test_pair[0], dtype=torch.long, device=device)
        
        output_words = evaluate(transformer, input_tensor, batch)
        
        for s in output_words:
            if len(s) != 0:
                data_w.write(s+"\n")
            else:
                data_w.write("\n")
        
    data_w.close()
コード例 #6
0
def get_loaders(dataroot, val_batch_size, train_batch_size, input_size,
                workers):
    val_data = MyDataset(txt_path=os.path.join(dataroot, 'valid_label1.txt'),
                         transform=get_transform(False, input_size))
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=val_batch_size,
                                             shuffle=False,
                                             num_workers=workers,
                                             pin_memory=True)

    train_data = MyDataset(txt_path=os.path.join(dataroot, 'train_label1.txt'),
                           transform=get_transform(input_size=input_size))
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=train_batch_size,
                                               shuffle=True,
                                               num_workers=workers,
                                               pin_memory=True)

    return train_loader, val_loader
コード例 #7
0
ファイル: train.py プロジェクト: flowerrin/Transformer
def Validation(transformer, n, epoch):
    all_BLEUscore = 0
    new_n = int(n/max_epoch)
    start = 0
    stop = n
    
    with torch.no_grad():
        pair = pairs_dev
        inputs, outputs = map(list, zip(*pair))
        dev_pairs = [tensorsFromPair(inputs[i].split(" "), outputs[i].split(" "))
                     for i in range(n)]
        dataset = MyDataset(dev_pairs)
        dataloader = torch.utils.data.DataLoader(dataset, batch_size=50, shuffle=False, num_workers=0, collate_fn=my_collate_fn)
        
        cnt = 0
        res = []
        hy = []
        chencherry = bleu_score.SmoothingFunction()
        for idx in tqdm(dataloader, ascii = True):
            batch = len(idx[0][0])
            dev_pair = idx[0]
            
            input_tensor = torch.tensor(dev_pair[0], dtype=torch.long, device=device)
            re_tensor = torch.tensor(dev_pair[1], dtype=torch.long, device=device)
            
            output_words = evaluate(transformer, input_tensor, batch, 0)
            #output_words = evaluate(transformer, input_tensor, batch, -1)  #debag
            
            for i in range(len(output_words)):
                re = outputs[cnt].split(" ")
                res.append([ re ])
                if len(output_words[i]) != 0:   # 空判定
                    hy.append(output_words[i].split(" "))
                else:
                    hy.append([""])
                
                cnt += 1
                
        #all_BLEUscore += bleu_score.corpus_bleu(res, hy, smoothing_function=chencherry.method4)  # smoothing_function=chencherry.method4
        all_BLEUscore += bleu_score.corpus_bleu(res, hy)
        all_BLEUscore *= 100
        
    return all_BLEUscore
コード例 #8
0
ファイル: train.py プロジェクト: by929/sgdet-gat
def compute_test(epoch, pos_weight = None):
    model.eval()

    test_feat_files, test_edge_files = generate_filenames('test')
    test_dataset = MyDataset('test', test_feat_files, test_edge_files)

    test_iter = Data.DataLoader(dataset = test_dataset, batch_size = 1)
    test_output = None
    test_labels = None

    for _, (x, y) in enumerate(test_iter):
        x = x[0].numpy()
        y = y[0].numpy()
        adj, features, labels, idx_test = load_data(x, y)

        if args.cuda:
            features = features.cuda()
            adj = adj.cuda()
            labels = labels.cuda()
            idx_test = idx_test.cuda()

        output = model(features, adj)
        preds = output[idx_test].max(1)[1].type_as(labels)
        if test_output is None:
            test_output = preds
            test_labels = labels[idx_test]
        else:
            test_output = torch.cat((test_output, preds), 0)
            test_labels = torch.cat((test_labels, labels[idx_test]), 0)

    acc_test, recall_bg, recall_nobg, precision_bg, precision_nobg \
                    = evaluation_test(test_output, test_labels)
    
    print('Epoch: {:04d}'.format(epoch+1),
          'acc_test: {:.6f}'.format(acc_test.data.item()),
          'recall_bg: {:.6f}'.format(recall_bg.data.item()),
          'recall_nobg: {:.6f}'.format(recall_nobg.data.item()),
          'precision_bg: {:.6f}'.format(precision_bg.data.item()),
          'precision_nobg: {:.6f}'.format(precision_nobg.data.item()))
コード例 #9
0
ファイル: read_data.py プロジェクト: Susie0731/pytorch_learn
        if word not in word_dict:
            word_dict[len(word_dict)] = word
        vec_str = data_pair[1]
        tmp = vec_str.replace('\t',
                              '').strip('\n').strip('[').strip(']').split(', ')
        vec = [round(float(i), 5) for i in tmp]
        # for i in tmp:
        #    print(float(i))
        word_vec_dict[word] = vec
    return word_dict, word_vec_dict


word_dict, word_vec_dict = read_data_vec(filename)
#vecs = list(word_vec_dict.values())
vecs = [torch.FloatTensor(i) for i in word_vec_dict.values()]
dataset = MyDataset(vecs, vecs)
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

test_data = vecs[:10]


class AutoEncoder(nn.Module):
    def __init__(self, embdding_size):
        super(AutoEncoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(embdding_size, 58),
            nn.Tanh(),
            nn.Linear(58, 29),
            nn.Tanh(),
            nn.Linear(29, 15),
            #nn.Tanh(),
コード例 #10
0
ファイル: main.py プロジェクト: xuxiaobogit/GraphGR
def main(opt):
    if opt.manual_seed is None:
        opt.manual_seed = random.randint(1, 10000)
        print('Random Seed: ', opt.manual_seed)
        random.seed(opt.manual_seed)
        torch.manual_seed(opt.manual_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(opt.manual_seed)

    model = Net(num_classes=opt.num_classes,
                num_layers=opt.num_layers,
                feat_dim=opt.feat_dim,
                embed_dim=opt.embed_dim,
                jk_layer=opt.jk_layer,
                process_step=opt.process_step,
                dropout=opt.dropout)
    print(model)
    model = model.to(device)
    criterion = torch.nn.CrossEntropyLoss()

    df = pd.read_csv(opt.lpath)
    nfold = len(df['group'].unique())

    dataset = MyDataset(opt.path)

    for k in range(nfold):
        trainings = df[df['group'] != k + 1]['instance'].tolist()
        validations = df[df['group'] == k + 1]['instance'].tolist()
        total = [f[:-3] for f in os.listdir(os.path.join(opt.path, 'raw'))]
        train_ids = [total.index(x) for x in trainings]
        val_ids = [total.index(x) for x in validations]

        train_ids = torch.tensor(train_ids, dtype=torch.long)
        val_ids = torch.tensor(val_ids, dtype=torch.long)
        train_dataset = dataset[train_ids]
        val_dataset = dataset[val_ids]
        train_loader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  drop_last=True)
        val_loader = DataLoader(val_dataset,
                                batch_size=opt.batch_size,
                                drop_last=True)

        tr_losses = np.zeros((opt.num_epochs, ))
        tr_accs = np.zeros((opt.num_epochs, ))
        val_losses = np.zeros((opt.num_epochs, ))
        val_accs = np.zeros((opt.num_epochs, ))

        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)
        # optimizer = torch.optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay, nesterov=True)
        best_val_loss = 1e6

        print('===================Fold {} starts==================='.format(k +
                                                                            1))
        for epoch in range(opt.num_epochs):
            s = time.time()

            model.train()
            losses = 0
            acc = 0

            for i, data in enumerate(train_loader):
                data = data.to(device)
                optimizer.zero_grad()
                output = model(data)
                loss = criterion(output, data.y.squeeze())
                loss.backward()
                optimizer.step()

                y_true = data.y.squeeze().cpu().numpy()
                y_pred = output.data.cpu().numpy().argmax(axis=1)
                acc += accuracy_score(y_true, y_pred) * 100
                losses += loss.data.cpu().numpy()

            tr_losses[epoch] = losses / (i + 1)
            tr_accs[epoch] = acc / (i + 1)

            model.eval()
            v_losses = 0
            v_acc = 0
            y_preds = []
            y_trues = []

            for j, data in enumerate(val_loader):
                data = data.to(device)
                with torch.no_grad():
                    output = model(data)
                    loss = criterion(output, data.y.squeeze())

                y_pred = output.data.cpu().numpy().argmax(axis=1)
                y_true = data.y.squeeze().cpu().numpy()
                y_trues += y_true.tolist()
                y_preds += y_pred.tolist()
                v_acc += accuracy_score(y_true, y_pred) * 100
                v_losses += loss.data.cpu().numpy()

            cnf = confusion_matrix(y_trues, y_preds)
            val_losses[epoch] = v_losses / (j + 1)
            val_accs[epoch] = v_acc / (j + 1)

            current_val_loss = v_losses / (j + 1)
            if current_val_loss < best_val_loss:
                best_val_loss = current_val_loss
                torch.save(
                    model.state_dict(),
                    os.path.join(output_path,
                                 'best_model_fold{}.ckpt'.format(k + 1)))

            print(
                'Epoch: {:03d} | time: {:.4f} seconds\n'
                'Train Loss: {:.4f} | Train accuracy {:.4f}\n'
                'Validation Loss: {:.4f} | Validation accuracy {:.4f} | Best {:.4f}'
                .format(epoch + 1,
                        time.time() - s, losses / (i + 1), acc / (i + 1),
                        v_losses / (j + 1), v_acc / (j + 1), best_val_loss))
            print('Validation confusion matrix:')
            print(cnf)

        print('===================Fold {} ends==================='.format(k +
                                                                          1))
        np.save(os.path.join(log_path, 'train_loss_{}.npy'.format(k + 1)),
                tr_losses)
        np.save(os.path.join(log_path, 'train_acc_{}.npy'.format(k + 1)),
                tr_accs)
        np.save(os.path.join(log_path, 'val_loss_{}.npy'.format(k + 1)),
                val_losses)
        np.save(os.path.join(log_path, 'val_acc_{}.npy'.format(k + 1)),
                val_accs)
コード例 #11
0
ファイル: train_me.py プロジェクト: cyj407/protonet-face
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--max-epoch', type=int, default=200)
    parser.add_argument('--save-epoch', type=int, default=50)
    parser.add_argument('--shot', type=int, default=5)
    parser.add_argument('--query', type=int, default=1)
    parser.add_argument('--way', type=int, default=21)
    parser.add_argument('--save-path', default='./save/proto-me-200')
    parser.add_argument('--gpu', default='0')
    args = parser.parse_args()
    pprint(vars(args))

    set_gpu(args.gpu)
    ensure_path(args.save_path)

    trainset = MyDataset('train', './data20_me/')
    train_sampler = TrainSampler(trainset.label, 25, args.way,
                                 args.shot + args.query)
    train_loader = DataLoader(dataset=trainset,
                              batch_sampler=train_sampler,
                              pin_memory=True)

    valset = MyDataset('val', './data20_me/')
    val_sampler = TrainSampler(valset.label, 50, args.way,
                               args.shot + args.query)
    val_loader = DataLoader(dataset=valset,
                            batch_sampler=val_sampler,
                            pin_memory=True)

    model = Protonet().cuda()
コード例 #12
0
ファイル: MSDN.py プロジェクト: dungmn/Perdestrian-
def train():
    THERMAL_PATH = '/storageStudents/K2015/duyld/dungnm/dataset/KAIST/train/images_train_tm/'
    ROOT_DIR = '/storageStudents/K2015/duyld/dungnm/dataset/KAIST/train/images_train'
    IMGS_CSV = 'mydata/imgs_train.csv'
    ROIS_CSV = 'mydata/rois_trainKaist_thr70_MSDN.csv'
    full_transform = transforms.Compose(
        [RandomHorizontalFlip(),
         ToTensor(), my_normalize()])
    # Normalize(rgb_mean,rgb_std)])

    device = torch.device("cuda:0")
    params = {'batch_size': 2, 'shuffle': True, 'num_workers': 24}
    print(params)
    max_epoch = 5
    print('max_epoch', max_epoch)
    LR = 1e-3  #learning rate
    print('learning_rate', LR)
    MT = 0.9  #momentum

    my_dataset = MyDataset(imgs_csv=IMGS_CSV,
                           rois_csv=ROIS_CSV,
                           root_dir=ROOT_DIR,
                           ther_path=THERMAL_PATH,
                           transform=full_transform)
    print(my_dataset.__len__())
    dataloader = DataLoader(my_dataset, **params)
    # print(list(aa.front_subnetB.parameters())[2])
    device = torch.device("cuda:0")

    MSDN_net = MyMSDN()
    MSDN_net.to(device)

    criterion = nn.MSELoss()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                 MSDN_net.parameters()),
                          lr=LR,
                          momentum=MT)

    for epoch in range(max_epoch):  # Lặp qua bộ dữ liệu huấn luyện nhiều lần
        running_loss = 0.0
        st = time.time()
        for i, sample in enumerate(dataloader):

            label = sample['label']
            bbb = sample['bb']
            gt_rois = sample['gt_roi']

            # label,bbb,gt_rois = label.to(device),bbb.to(device),gt_rois.to(device)

            bbox_label, bbox_targets, bbox_inside_weights, bbox_outside_weights = createTarget(
                label, bbb, gt_rois)

            bbox_label, bbox_targets, bbox_inside_weights, bbox_outside_weights = bbox_label.to(
                device), bbox_targets.to(device), bbox_inside_weights.to(
                    device), bbox_outside_weights.to(device)

            sam = sample['image']

            # bbb = sample['bb']

            num = bbb.size(1)
            bbb = bbb.view(-1, 5)

            ind = torch.arange(params['batch_size'],
                               requires_grad=False).view(-1, 1)
            ind = ind.repeat(1, num).view(-1, 1)
            bbb[:, 0] = ind[:, 0]

            # print(bbb.shape)
            # print(tm.shape)
            sam = sam.to(device)
            bbb = bbb.to(device)

            cls_score, bbox_pred = MSDN_net(sam, bbb)

            RCNN_loss_cls = F.cross_entropy(cls_score, bbox_label)
            # print(RCNN_loss_cls.mean())
            RCNN_loss_bbox = smooth_l1_loss(bbox_pred, bbox_targets,
                                            bbox_inside_weights,
                                            bbox_outside_weights)
            # print(RCNN_loss_bbox.mean())

            loss = RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
            # print('loss at {}: '.format(i),loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            if i % 10 == 9:  # In mỗi 2000 mini-batches.
                text = '[{}, {}] loss: {:.3f}  time: {:.3f}'.format(
                    epoch + 1, i + 1, running_loss / 10,
                    time.time() - st)
                print(text)
                with open('models/MSDN/model1/log1.txt', 'a') as f:
                    f.write(text + '\n')
                running_loss = 0.0
                st = time.time()
        torch.save(
            MSDN_net.state_dict(),
            'models/MSDN/model1/model1_lr_1e-3_bz_6_NBS_128_norm_epoch_{}.pth'.
            format(epoch))
    print('Huấn luyện xong')
コード例 #13
0
import os
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch
from mydataset import MyDataset
import torch.nn as nn
import numpy as np


# 定义时的接口与之前的ImageFolder一样
traindir = '/data2/yujin/cadene_cnn/data/train_val/train'
mydataset = MyDataset(traindir, mode='train')
myloader = torch.utils.data.DataLoader(mydataset, batch_size=4, shuffle=True)


# related to mix_up
def mixup_data(x, y, alpha=1.0, use_cuda=True):
    '''Returns mixed inputs, pairs of targets, and lambda'''
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = x.size()[0]
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
コード例 #14
0
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

# 出力先のフォルダーを作成
try:
    os.makedirs('results/%s' % args.run_name)
except OSError:
    pass

torch.manual_seed(args.seed)
cudnn.benchmark = True

device = torch.device("cuda" if args.cuda else "cpu")

train_dataset = MyDataset(args.csv_path, args.num_primary_color, mode='train')
train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=args.batch_size,
    shuffle=True,
    num_workers=args.num_workers,
    worker_init_fn=lambda x: np.random.seed(),
    drop_last=True,
    pin_memory=True)

val_dataset = MyDataset(args.csv_path, args.num_primary_color, mode='val')
val_loader = torch.utils.data.DataLoader(
    val_dataset,
    batch_size=1,
    shuffle=False,
    num_workers=0,
コード例 #15
0
import torch.nn.parallel
from torch.autograd import Variable
import torch
import model
from mydataset import MyDataset, ToTensor
from torchvision.transforms import transforms
from torch.utils.data import DataLoader

batchsize = 1
epochsize = 500
learningRate = 0.001
print_step = 50

trainPath = r'data/trainingDigits'

train_dataset = MyDataset(trainPath,
                          transform=transforms.Compose([ToTensor()]))
train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batchsize,
                          shuffle=True)

SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
net = model.LeNet().cuda()
criteron = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(),
                             lr=learningRate,
                             weight_decay=0.0001)

scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                            step_size=100,
コード例 #16
0
def main():
    batch_size = 2
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    train_db = MyDataset('E:\\ai_learning_resource\\hwdb\\HWDB1\\train', 224, num_clazz=num_clazz, mode='train')
    val_db = MyDataset('E:\\ai_learning_resource\\hwdb\\HWDB1\\train', 224, num_clazz=num_clazz, mode='val')

    train_loader = DataLoader(train_db, batch_size=batch_size, shuffle=True, num_workers=nw)

    val_loader = DataLoader(val_db, batch_size=batch_size, num_workers=nw // 2)

    print("using {} images for training, {} images for validation.".format(len(train_db), len(val_db)))

    # net = GoogLeNet(num_classes=num_clazz, aux_logits=True, init_weights=True)
    net = models.densenet121(pretrained=True, bn_size=batch_size, drop_rate=0.6, num_classes=num_clazz)
    net.to(device)
    optimizer = optim.Adam(net.parameters(), lr=lr)
    criteon = nn.CrossEntropyLoss().to(device)

    start = time.time()
    print('start time: ', start)
    train_steps = len(train_loader)
    # best_acc, best_epoch = 0, 0
    for epoch in range(epochs):
        net.train()
        print('start train')
        x, y = next(iter(train_loader))
        print(x[0].numpy(), y[0])
        running_loss = 0.0
        #     train_bar = tqdm(train_loader)
        for step, data in enumerate(train_loader):
            print('training')
            x, y = data
            x, y = x.to(device), y.to(device)
            optimizer.zero_grad()
            logits, aux_logits2, aux_logits1 = net(x)
            loss0 = criteon(logits, y)
            loss1 = criteon(aux_logits1, y)
            loss2 = criteon(aux_logits2, y)
            loss = loss0 + loss1 * 0.3 + loss2 * 0.3
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            print('Step {}/{}'.format(step, len(train_loader)))

        # train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1, epochs, loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            # val_bar = tqdm(val_loader)
            for val_x, val_y in val_loader:
                val_x, val_y = val_x.to(device), val_y.to(device)
                outputs = net(val_x)  # eval model only have last output layer
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_y).sum().item()

        val_accurate = acc / len(val_db)
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

    print('Finished Training')
    print('\n{} epoch cost time {:f}s'.format(epochs, time.time() - start))
コード例 #17
0
ファイル: rnnlm.py プロジェクト: Susie0731/pytorch_learn
    if word in word_to_ix:
        vec[word_to_ix[word]] = 1
    else:
        vec[len(word_to_ix)] = 1
    return vec


data_words = []
data_labels = []
for i in range(len(sentence_set) -2):
    word = sentence_set[i]
    label = sentence_set[i+1]
    data_words.append(make_word_to_ix(word,word_to_ix))
    data_labels.append(make_word_to_ix(label,word_to_ix))

dataset = MyDataset(data_words, data_labels)
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE,shuffle=True)

'''
for x in enumerate(train_loader):
    print("word_batch------------>\n")
    print(batch[0])
    print("label batch----------->\n")
    print(batch[1])
'''

class RNNModel(nn.Module):

    def __init__(self, embdding_size, hidden_size):
        super(RNNModel, self).__init__()
        self.rnn = nn.RNN(embdding_size, hidden_size,num_layers=1,nonlinearity='relu')
コード例 #18
0
ファイル: train.py プロジェクト: NGccc/lstm_R_C
loss_func_r = nn.L1Loss(reduce=False)
loss_func_c = nn.CrossEntropyLoss(reduce=False)

optimizer = optim.Adam(model.parameters(), lr=lr)
#optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8)

#train_dataset = MyDataset(train=True, transform=transforms.ToTensor())
#train_loader  = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

#when inference
batch_size = 1
epochs = 1
th = 1

val_dataset = MyDataset(train=False, transform=transforms.ToTensor())
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

#when inference
dloader = [
    val_loader,
]
is_train = 0

#when train
#dloader  = [train_loader, ]
#is_train = 1

tag = ['val', 'train']
for epoch in range(epochs):
    #recode running_loss[1] is train, 0 is val
コード例 #19
0
torch.cuda.manual_seed(seed)  #gpu
np.random.seed(seed)  #numpy
random.seed(seed)
torch.backends.cudnn.deterministic = True  # cudnn

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--load', default='./save/proto-5-1000/max-acc.pth')
    parser.add_argument('--way', type=int, default=20)
    parser.add_argument('--shot', type=int, default=5)
    parser.add_argument('--query', type=int, default=1)
    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    #### preprocess
    dataset = MyDataset('test', './data20_v2/')
    sampler = TestSampler(dataset.label, 1, args.way, args.shot + args.query)
    loader = DataLoader(dataset,
                        batch_sampler=sampler,
                        num_workers=8,
                        pin_memory=True)

    model = Protonet().cuda()
    model.load_state_dict(torch.load(args.load))
    model.eval()

    data, l = [_.cuda() for _ in next(iter(loader))]
    k = args.way * args.shot
    data_shot = data[:k]

    x = model(data_shot)
コード例 #20
0
ファイル: train.py プロジェクト: zghzdxs/segmentation
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            print("%d/%d,train_loss:%0.3f" %
                  (step,
                   (dt_size - 1) // dataload.batch_size + 1, loss.item()))
        break
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss / step))
    return
    torch.save(model.state_dict(), 'weights_%d.pth' % epoch)
    return model


#训练模型
model = Unet(3, 1).to(device)
batch_size = 1
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters())

dataset = MyDataset(image_path,
                    label_path,
                    train_data,
                    transform=x_transforms,
                    target_transform=y_transforms)
data_loader = data.DataLoader(dataset,
                              batch_size=1,
                              shuffle=True,
                              num_workers=0)

train_model(model, criterion, optimizer, data_loader)
コード例 #21
0
ファイル: train.py プロジェクト: flowerrin/Transformer
def trainIters(transformer, n_iters, print_every, learning_rate=0):
    start = time.time()
    print_loss_total = 0  # Reset every print_every
    
    #transformer_optimizer = optim.SGD(transformer.parameters(), lr=learning_rate)
    transformer_optimizer = optim.Adam(transformer.parameters(), lr=learning_rate, weight_decay=1e-06, betas=(0.9, 0.98), eps=1e-09)
                                                                                   #weight_decay=1e-06 , 0.002
                                                                                   
    criterion = LabelSmoothing(PAD_token, 0.1)
    #criterion = nn.NLLLoss(ignore_index=PAD_token, reduction='sum')
    
    global max_epoch
    BLEUs = {}
    all_step_cnt = 1
    
    inputs, outputs = map(list, zip(*pairs))
    training_pairs = [tensorsFromPair(inputs[i].split(" "), outputs[i].split(" "))
                      for i in range(text_size)]
    
    dataset = MyDataset(training_pairs)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0, collate_fn=my_collate_fn)
    #num_workers=3
    
    for epoch in range(1, max_epoch+1):
        start_epo = time.time()
        print_loss_avg_total = 0
        iter = 1
        
        transformer.train()
        
        # idx : [inputs, targets], [ip_len, op_len]
        # 1iter : [batch_size, length]
        for idx in dataloader:
            batch = len(idx[0][0])
            training_pair = idx[0]
            sq_lengths = idx[1]
            
            input_tensor = torch.tensor(training_pair[0], dtype=torch.long, device=device)
            target_tensor = torch.tensor(training_pair[1], dtype=torch.long, device=device)
            
            loss = train(input_tensor, target_tensor, sq_lengths,
                         transformer, transformer_optimizer, criterion, all_step_cnt, batch)
            print_loss_total += loss
            
            if iter % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_avg_total += print_loss_avg
                print_loss_total = 0
                print('%s (%d %d%%) %.4f' % (timeSince(start, start_epo, iter / n_iters),
                                             iter, iter / n_iters * 100, print_loss_avg))
            
            all_step_cnt += 1
            iter += 1
            
        print("Epoch %d Loss: %.4f" % (epoch, print_loss_avg_total/10))
        
        model_name = "./models/seq2seq_model_v{}.pt".format(epoch)
        torch.save({
            "transformer_model": transformer.state_dict(),
        }, model_name)
        
        
        all_BLEUscore = Validation(transformer, len(pairs_dev), epoch-1)
        print("Epoch %d BLEU : %.4f" % (epoch, all_BLEUscore))
        
        # Early Stopping
        if epoch >= 6:
            BLEU_list = list(BLEUs.values())
            if (sum(BLEU_list) / len(BLEU_list)) >= all_BLEUscore:
                max_BLEU_epoch = max(BLEUs, key=BLEUs.get)
                print("Max Epoch:", max_BLEU_epoch)
                max_epoch = max_BLEU_epoch
                
                model_name = "./models/seq2seq_model.pt"
                torch.save({
                    "transformer_model": transformer.state_dict(),
                }, model_name)
                break
            else:
                min_BLEU_epoch = min(BLEUs, key=BLEUs.get)
                del BLEUs[min_BLEU_epoch]
        BLEUs[epoch] = all_BLEUscore
コード例 #22
0
        np.random.uniform(low=0.0, high=1.0, size=x.shape[0]))
    epsilon = epsilon.type(torch.FloatTensor)
    return 2 * x + 8 + epsilon.unsqueeze(1)


X, Y = gen_data(N_DATA + N_Val)
XTrain, YTrain = X[:N_DATA], Y[:N_DATA]
XTest, YTest = X[N_DATA:], Y[N_DATA:]

XTrain = Variable(torch.FloatTensor(XTrain))
YTrain = Variable(torch.FloatTensor(YTrain))

XTest = Variable(torch.FloatTensor(XTest))
YTest = Variable(torch.FloatTensor(YTest))

train_set = MyDataset(x=XTrain, y=YTrain)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)

model = MyNet(n_in=1, n_out=1).to(device)

model.eval()
YPRED = model.forward(XTest)

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=INITIAL_LEARNING_RATE)
scheduler = StepLR(optimizer, 20)

for epoch in range(N_EPOCHS):
    model.train()
    epoch_loss = 0
    for i, (x, y) in enumerate(train_loader):
コード例 #23
0
ファイル: MSDN.py プロジェクト: nguyendu394/Perdestrian-
def train(pretrain):
    print('TRAINING MSDN...')

    full_transform=transforms.Compose([RandomHorizontalFlip(),
                                       ToTensor(),
                                       Normalize(cfg.BGR_MEAN,cfg.BGR_STD)])

    params = {'batch_size': cfg.TRAIN.BATCH_SIZE,
              'shuffle': cfg.TRAIN.SHUFFLE,
              'num_workers': cfg.TRAIN.NUM_WORKERS}
    print(params)
    max_epoch = cfg.TRAIN.MAX_EPOCH
    print('max_epoch',max_epoch)
    LR = cfg.TRAIN.LEARNING_RATE #learning rate
    print('learning_rate',LR)
    MT = cfg.TRAIN.MOMENTUM #momentum
    W_DECAY = cfg.TRAIN.WEIGHT_DECAY
    print('weight decay',W_DECAY)

    my_dataset = MyDataset(imgs_csv=cfg.TRAIN.IMGS_CSV,rois_csv=cfg.TRAIN.ROIS_CSV,
    root_dir=cfg.TRAIN.ROOT_DIR, ther_path=cfg.TRAIN.THERMAL_PATH,transform = full_transform)
    print(my_dataset.__len__())
    dataloader = DataLoader(my_dataset, **params)

    MSDN_net = MyMSDN()
    MSDN_net.to(cfg.DEVICE)

    #load pretrain model
    if pretrain is not None:
        print('pretrain: ' + pretrain)
        MSDN_net.load_state_dict(torch.load(pretrain))

    criterion = nn.MSELoss()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,MSDN_net.parameters()), lr=LR, momentum=MT,weight_decay=W_DECAY)

    f = open('models/MSDN/model12/log12.txt','a')
    for epoch in range(max_epoch):  # Lặp qua bộ dữ liệu huấn luyện nhiều lần
        running_loss = 0.0
        st = time.time()
        for i, sample in enumerate(dataloader):

            label = sample['label']
            bbb = sample['bb']
            gt_rois = sample['gt_roi']
            # label,bbb,gt_rois = label.to(cfg.DEVICE),bbb.to(cfg.DEVICE),gt_rois.to(cfg.DEVICE)

            bbox_label,bbox_targets,bbox_inside_weights,bbox_outside_weights = createTarget(label,bbb,gt_rois)

            bbox_label,bbox_targets,bbox_inside_weights,bbox_outside_weights = bbox_label.to(cfg.DEVICE),bbox_targets.to(cfg.DEVICE),bbox_inside_weights.to(cfg.DEVICE),bbox_outside_weights.to(cfg.DEVICE)

            sam = sample['image']

            num=bbb.size(1)
            bbb=bbb.view(-1, 5)

            ind = torch.arange(params['batch_size'],requires_grad=False).view(-1,1)
            ind = ind.repeat(1,num).view(-1,1)
            bbb[:,0] = ind[:,0]

            sam = sam.to(cfg.DEVICE)
            bbb = bbb.to(cfg.DEVICE)

            cls_score, bbox_pred = MSDN_net(sam,bbb)

            RCNN_loss_cls = F.cross_entropy(cls_score, bbox_label)

            RCNN_loss_bbox = smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
            # print(RCNN_loss_bbox.mean())

            loss = RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
            # print('loss at {}: '.format(i),loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            if i % 10 == 9:    # In mỗi 10 mini-batches.
                text = '[{}, {}] loss: {:.3f}  time: {:.3f}'.format(epoch + 1, i + 1, running_loss / 10,time.time()-st)
                print(text)
                #write txt file
                f.write(text + '\n')
                running_loss = 0.0
                st = time.time()
        name_model = 'models/MSDN/model12/model12_lr_1e-4_bz_2_decay_epoch_{}.pth'.format(epoch)
        torch.save(MSDN_net.state_dict(), name_model)
    f.close()
    print('model saved: ' + name_model)
    print('Huấn luyện xong')
コード例 #24
0
def train():
    THERMAL_PATH = '/storageStudents/K2015/duyld/dungnm/dataset/KAIST/train/images_train_tm/'
    ROOT_DIR = '/storageStudents/K2015/duyld/dungnm/dataset/KAIST/train/images_train'
    IMGS_CSV = 'mydata/imgs_train.csv'
    ROIS_CSV = 'mydata/rois_trainKaist_thr70_1.csv'

    params = {'batch_size': 6, 'shuffle': True, 'num_workers': 12}
    print(params)
    max_epoch = 10
    print('max_epoch', max_epoch)
    LR = 1e-6  #learning rate
    print('learning_rate', LR)
    MT = 0.9  #momentum

    device = torch.device("cuda:0")
    # cudnn.benchmark = True
    # transform = ToTensor()
    full_transform = transforms.Compose(
        [RandomHorizontalFlip(),
         ToTensor(), my_normalize()])
    # Normalize(rgb_mean,rgb_std)])

    my_dataset = MyDataset(imgs_csv=IMGS_CSV,
                           rois_csv=ROIS_CSV,
                           root_dir=ROOT_DIR,
                           ther_path=THERMAL_PATH,
                           transform=full_transform)

    dataloader = DataLoader(my_dataset, **params)

    RRN_net = MyRRN()
    RRN_net.to(device)

    RRN_net.load_state_dict(
        torch.load(
            'models/model24/model24_lr_1e-6_bz_6_NBS_128_norm_epoch_9.pth'))
    criterion = nn.MSELoss()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                 RRN_net.parameters()),
                          lr=LR,
                          momentum=MT)

    for epoch in range(max_epoch):  # Lặp qua bộ dữ liệu huấn luyện nhiều lần
        running_loss = 0.0
        st = time.time()
        for i, data in enumerate(dataloader):
            # Lấy dữ liệu
            sample = data
            sam = sample['image']
            bbb = sample['bb']
            num = bbb.size(1)

            bbb = bbb.view(-1, 5)

            #reset id
            # bbb[:, 0] = bbb[:, 0] - bbb[0, 0]
            ind = torch.arange(params['batch_size'],
                               requires_grad=False).view(-1, 1)
            ind = ind.repeat(1, num).view(-1, 1)
            bbb[:, 0] = ind[:, 0]

            tm = sample['tm']
            # print(bbb.shape)
            # print(tm.shape)
            sam, bbb, tm = sam.to(device), bbb.to(device), tm.to(device)

            # roi_pool = ROIPool((50, 50), 1/1)

            # labels_output = roi_pool(tm,bbb)
            labels_output = resizeThermal(tm, bbb)
            labels_output = labels_output.to(device)

            # Xoá giá trị đạo hàm
            optimizer.zero_grad()

            # Tính giá trị tiên đoán, đạo hàm, và dùng bộ tối ưu hoá để cập nhật trọng số.
            out_RRN = RRN_net(sam, bbb)

            loss = criterion(out_RRN, labels_output)
            # print('loss at {}: '.format(i),loss.item())
            loss.backward()
            optimizer.step()

            # In ra số liệu trong quá trình huấn luyện
            running_loss += loss.item()
            if i % 10 == 9:  # In mỗi 2000 mini-batches.
                text = '[{}, {}] loss: {:.3f}  time: {:.3f}'.format(
                    epoch + 1, i + 1, running_loss / 10,
                    time.time() - st)
                print(text)
                with open('models/model26/log26.txt', 'a') as f:
                    f.write(text + '\n')
                running_loss = 0.0
                st = time.time()
        torch.save(
            RRN_net.state_dict(),
            'models/model26/model26_lr_1e-6_bz_6_NBS_128_norm_epoch_{}.pth'.
            format(epoch))
    print('Huấn luyện xong')
コード例 #25
0
ファイル: inference.py プロジェクト: formatechnologies/FSCS
#                 manual_color_2,
#                 manual_color_3,
#                 manual_color_4,
#                 manual_color_5,
#             ]
#         )
#         / 255
#     )
manual_colors = model.cluster_centers_ / 255

try:
    os.makedirs("results/%s/%s" % (run_name, img_name))
except OSError:
    pass

test_dataset = MyDataset(csv_path, num_primary_color, mode="test")
test_loader = torch.utils.data.DataLoader(
    test_dataset,
    batch_size=1,
    shuffle=False,
    num_workers=0,
)

device = "cuda"

# define model
mask_generator = MaskGenerator(num_primary_color).to(device)
residue_predictor = ResiduePredictor(num_primary_color).to(device)

# load params
path_mask_generator = "results/" + run_name + "/mask_generator.pth"
コード例 #26
0
def main():
    x_train, val_data, x_label, val_label = LoadData(sys.argv[1])
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    data_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomResizedCrop(44),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0], std=[255])
    ])

    val_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.CenterCrop(44),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0], std=[255])
    ])

    train_set = MyDataset(x_train, x_label, data_transform)
    val_set = MyDataset(val_data, val_label, val_transform)

    batch_size = 128
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=8)
    val_loader = DataLoader(val_set,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=8)

    model = Classifier().to(device)
    model.initialize_weights()
    print(model.eval())

    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    scheduler = ReduceLROnPlateau(optimizer, 'min')

    best_acc = 0.0
    num_epoch = 1000

    for epoch in range(num_epoch):
        adjust_learning_rate(optimizer, epoch)

        epoch_start_time = time.time()
        train_acc = 0.0
        train_loss = 0.0
        val_acc = 0.0
        val_loss = 0.0

        model.train()
        for i, data in enumerate(train_loader):
            optimizer.zero_grad()
            train_pred = model(data[0].to(device))
            batch_loss = loss(train_pred, data[1].to(device))
            batch_loss.backward()
            optimizer.step()

            train_acc += np.sum(
                np.argmax(train_pred.cpu().data.numpy(), axis=1) ==
                data[1].numpy())
            train_loss += batch_loss.item()

            progress = ('#' * int(float(i) / len(train_loader) * 40)).ljust(40)
            print ('[%03d/%03d] %2.2f sec(s) | %s |' % (epoch+1, num_epoch, \
                    (time.time() - epoch_start_time), progress), end='\r', flush=True)

        model.eval()
        for i, data in enumerate(val_loader):
            val_pred = model(data[0].cuda())
            batch_loss = loss(val_pred, data[1].to(device))

            val_acc += np.sum(
                np.argmax(val_pred.cpu().data.numpy(), axis=1) ==
                data[1].numpy())
            val_loss += batch_loss.item()

            progress = ('#' * int(float(i) / len(val_loader) * 40)).ljust(40)
            print ('[%03d/%03d] %2.2f sec(s) | %s |' % (epoch+1, num_epoch, \
                    (time.time() - epoch_start_time), progress), end='\r', flush=True)

        val_acc = val_acc / val_set.__len__()
        print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f | Val Acc: %3.6f loss: %3.6f' % \
                (epoch + 1, num_epoch, time.time()-epoch_start_time, \
                train_acc/train_set.__len__(), train_loss, val_acc, val_loss))

        # scheduler.step(val_loss)
        if (val_acc > best_acc):
            best_acc = val_acc

            if not os.path.exists("static_dict/"):
                os.system('mkdir static_dict/')

            saving_compression(model)
            os.system("du static_dict/ --apparent-size --bytes --max-depth=0")
            with open('acc.txt', 'w') as f:
                f.write(str(epoch) + '\t' + str(val_acc) + '\t')


#             torch.save(model.state_dict(), 'save/model.pth')
            print('Model Saved!')
コード例 #27
0
def test():
    TEST_THERMAL_PATH = '/storageStudents/K2015/duyld/dungnm/dataset/KAIST/test/images_test_tm/'
    TEST_ROOT_DIR = '/storageStudents/K2015/duyld/dungnm/dataset/KAIST/test/images_test'
    IMGS_CSV = 'mydata/imgs_test.csv'
    ROIS_CSV = 'mydata/rois_testKaist_thr70_0.csv'

    params = {'batch_size': 2, 'shuffle': True, 'num_workers': 24}

    device = torch.device("cuda:0")
    full_transform = transforms.Compose([
        RandomHorizontalFlip(),
        ToTensor(),
    ])
    # Normalize(rgb_mean,rgb_std)])

    my_dataset = MyDataset(imgs_csv=IMGS_CSV,
                           rois_csv=ROIS_CSV,
                           root_dir=TEST_ROOT_DIR,
                           ther_path=TEST_THERMAL_PATH,
                           transform=full_transform)
    print(my_dataset.__len__())
    dataloader = DataLoader(my_dataset, **params)

    RRN_net = MyRRN()
    RRN_net.to(device)
    RRN_net.load_state_dict(
        torch.load(
            'models/model21/model21_lr_1e-7_bz_6_NBS_128_data_True_epoch_7.ptx'
        ))

    st = time.time()
    running_loss = 0.0
    total_loss = []
    criterion = nn.MSELoss()

    for i, sample in enumerate(dataloader):
        # Lấy dữ liệu
        sam = sample['image']
        bbb = sample['bb']
        num = bbb.size(1)
        bbb = bbb.view(-1, 5)
        #reset id
        # bbb[:, 0] = bbb[:, 0] - bbb[0, 0]

        ind = torch.arange(params['batch_size'],
                           requires_grad=False).view(-1, 1)
        ind = ind.repeat(1, num).view(-1, 1)
        bbb[:, 0] = ind[:, 0]

        tm = sample['tm']
        # print(bbb.shape)
        # print(tm.shape)
        sam, bbb, tm = sam.to(device), bbb.to(device), tm.to(device)

        # roi_pool = ROIPool((50, 50), 1/1)

        # labels_output = roi_pool(tm,bbb)
        labels_output = resizeThermal(tm, bbb)
        labels_output = labels_output.to(device)
        # print('label shape',labels_output.shape)

        # Tính giá trị tiên đoán, đạo hàm, và dùng bộ tối ưu hoá để cập nhật trọng số.
        out_RRN = RRN_net(sam, bbb)

        loss = criterion(out_RRN, labels_output)

        # In ra số liệu trong quá trình huấn luyện
        running_loss += loss.item()
        if i % 10 == 9:  # In mỗi 9 mini-batches.
            text = '[{}, {}] loss: {:.3f}  time: {:.3f}'.format(
                0 + 1, i + 1, running_loss / 10,
                time.time() - st)
            print(text)
            with open('test2_model21_epoch7.txt', 'a') as f:
                f.write(text + '\n')
            total_loss.append(running_loss)
            running_loss = 0.0
            st = time.time()
    print("TOTAL: ", sum(total_loss) / len(total_loss))
コード例 #28
0
from torch import optim, nn
from torch.utils.data import DataLoader

from mydataset import MyDataset
from model import GoogLeNet

batchsz = 20
lr = 1e-3
epochs = 10
num_clazz = 200

device = torch.device('cuda')

root = "E:\\ai_learning_resource\\hwdb\\HWDB1\\train"
root_ = "E:\\ai_learning_resource\\hwdb\\HWDB1\\test"
train_db = MyDataset(root, 224, num_clazz, mode='train')
val_db = MyDataset(root, 224, num_clazz, mode='val')
test_db = MyDataset(root_, 224, num_clazz, mode='test')

train_loader = DataLoader(train_db,
                          batch_size=batchsz,
                          shuffle=True,
                          num_workers=8)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=8)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=8)


def evalute(model, loader):
    correct = 0
    total = len(loader.dataset)
    for x, y in loader:
コード例 #29
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Training")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument(
        "--save_dir",
        default="./data",
        help="path to save dataset",
        type=str,
    )
    parser.add_argument(
        "-d",
        "--data_name",
        default=['icpr'],
        help="path to save dataset",
        type=str,
        nargs='+',
    )
    parser.add_argument("--is_training",
                        '-t',
                        default=False,
                        help="training or evaluation",
                        action='store_true')
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "--ckpt",
        default=None,
        help="path to save dataset",
        type=str,
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()
    args.num_layers = 2
    args.featrue_layers = 512
    args.hidden_dim = 512
    args.vocab_size = 36
    args.out_seq_len = 30
    args.hidden_dim_de = 512
    args.embedding_size = 512
    args.batch_size = 32
    args.multi_bias = True
    ######
    args.lr = 0.02
    args.checkpoint_dir = './model/'  # adam_lowlr/
    args.optim = 'sgd'
    os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    args.device = 'cuda'
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)

    # settings for multi-gpu training not implement yet
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    args.distributed = False  # not implement
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
    else:
        pass

    # trainsform for train and test
    # TODO: data enhancement maybe in class dataset or in transform
    if args.is_training:
        transform = transforms.Compose([
            transforms.Resize((48, 160)),  # (h, w)   48, 160   6 40
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    else:
        transform = transforms.Compose([
            transforms.Resize((48, 160)),  # 32 280
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    # prepare for data
    # trainset = prepare_data(args.save_dir, args.data_name, args.is_training)
    if args.is_training:
        # trainset = MyDataset('./data/icpr/crop/',
        #                  './data/icpr/char2num.txt', transform)
        # ./2423/6/96_Flowerpots_29746.jpg flowerpots
        trainset = MyDataset([
            '90kDICT32px_train',
        ], transform)
        sys.stdout = Logger(args.checkpoint_dir + '/log.txt')
        train(args, args.local_rank, args.distributed, trainset)
    else:
        # testset= MyDataset('/data4/ydb/dataset/recognition/imgs2_east_regions', transform=transform)
        # testset = MyDataset('./data/icpr/crop/',
        #                  './data/icpr/char2num.txt', transform)
        testset = MyDataset('90kDICT32px_val', transform)
        test(args, args.local_rank, args.distributed, testset)
コード例 #30
0
ファイル: RRN.py プロジェクト: nguyendu394/Perdestrian-
def train():
    params = {
        'batch_size': cfg.TRAIN.BATCH_SIZE,
        'shuffle': cfg.TRAIN.SHUFFLE,
        'num_workers': cfg.TRAIN.NUM_WORKERS
    }
    print(params)
    max_epoch = cfg.TRAIN.MAX_EPOCH
    print('max_epoch', max_epoch)
    LR = cfg.TRAIN.LEARNING_RATE  #learning rate
    print('learning_rate', LR)
    MT = cfg.TRAIN.MOMENTUM

    # cudnn.benchmark = True
    # transform = ToTensor()
    full_transform = transforms.Compose([
        RandomHorizontalFlip(),
        ToTensor(),
        Normalize(cfg.BGR_MEAN, cfg.BGR_STD)
    ])

    my_dataset = MyDataset(imgs_csv=cfg.TRAIN.IMGS_CSV,
                           rois_csv=cfg.TRAIN.ROIS_CSV,
                           root_dir=cfg.TRAIN.ROOT_DIR,
                           ther_path=cfg.TRAIN.THERMAL_PATH,
                           transform=full_transform)

    dataloader = DataLoader(my_dataset, **params)

    RRN_net = MyRRN()
    RRN_net.to(cfg.DEVICE)

    RRN_net.load_state_dict(
        torch.load(
            'models/RRN/model24/model24_lr_1e-6_bz_6_NBS_128_norm_epoch_9.pth')
    )
    criterion = nn.MSELoss()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                 RRN_net.parameters()),
                          lr=LR,
                          momentum=MT)

    f = open('models/RRN/model27/log27.txt', 'a')
    for epoch in range(max_epoch):  # Lặp qua bộ dữ liệu huấn luyện nhiều lần
        running_loss = 0.0
        st = time.time()
        for i, data in enumerate(dataloader):
            # Lấy dữ liệu
            sample = data
            sam = sample['image']
            bbb = sample['bb']
            num = bbb.size(1)

            bbb = bbb.view(-1, 5)

            #reset id
            # bbb[:, 0] = bbb[:, 0] - bbb[0, 0]
            ind = torch.arange(params['batch_size'],
                               requires_grad=False).view(-1, 1)
            ind = ind.repeat(1, num).view(-1, 1)
            bbb[:, 0] = ind[:, 0]

            tm = sample['tm']

            sam, bbb, tm = sam.to(cfg.DEVICE), bbb.to(cfg.DEVICE), tm.to(
                cfg.DEVICE)

            # labels_output = roi_pool(tm,bbb)
            labels_output = resizeThermal(tm, bbb)
            labels_output = labels_output.to(cfg.DEVICE)

            # Xoá giá trị đạo hàm
            optimizer.zero_grad()

            # Tính giá trị tiên đoán, đạo hàm, và dùng bộ tối ưu hoá để cập nhật trọng số.
            out_RRN = RRN_net(sam, bbb)

            loss = criterion(out_RRN, labels_output)
            # print('loss at {}: '.format(i),loss.item())
            loss.backward()
            optimizer.step()

            # In ra số liệu trong quá trình huấn luyện
            running_loss += loss.item()
            if i % 10 == 9:  # In mỗi 2000 mini-batches.
                text = '[{}, {}] loss: {:.3f}  time: {:.3f}'.format(
                    epoch + 1, i + 1, running_loss / 10,
                    time.time() - st)
                print(text)

                f.write(text + '\n')
                running_loss = 0.0
                st = time.time()
        torch.save(
            RRN_net.state_dict(),
            'models/RRN/model27/model27_lr_1e-6_bz_6_NBS_128_norm_epoch_{}.pth'
            .format(epoch))
    f.close()
    print('Huấn luyện xong')