Exemplo n.º 1
0
def run(train_sets, valid_sets, idx, save_dr):
    batch_size = 8
    imagenet_data = ImageFolder(train_sets, transform=data_transforms['train'])
    test_data = ImageFolder(valid_sets, transform=data_transforms['val'])
    data_loader = DataLoader(imagenet_data,
                             batch_size=batch_size,
                             shuffle=True)
    test_data_loader = DataLoader(test_data, batch_size=1, shuffle=True)

    cls_num = len(imagenet_data.class_to_idx)
    model = inceptionresnetv2(num_classes=1001, pretrained=None)
    model.load_state_dict(
        torch.load('/home/dsl/all_check/inceptionresnetv2-520b38e4.pth'),
        strict=True)
    model.last_linear = nn.Linear(1536, cls_num)
    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    #optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'],
    #weight_decay=state['decay'], nesterov=True)

    optimizer = torch.optim.Adam(model.parameters(),
                                 state['learning_rate'],
                                 weight_decay=state['decay'],
                                 amsgrad=True)

    state['label_ix'] = imagenet_data.class_to_idx
    state['cls_name'] = idx

    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                         factor=0.9,
                                         patience=3)

    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()

    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for (data, target) in progress(data_loader):
            data, target = torch.autograd.Variable(
                data.cuda()), torch.autograd.Variable(target.cuda())
            output = model(data)
            pred = output.data.max(1)[1]
            correct += float(pred.eq(target.data).sum())
            optimizer.zero_grad()
            loss = focal_loss(output, target)
            loss.backward()
            optimizer.step()
            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, len(data_loader.dataset), loss_avg)
        state['train_accuracy'] = correct / len(data_loader.dataset)
        state['train_loss'] = loss_avg

    def test():
        with torch.no_grad():
            model.eval()
            loss_avg = 0.0
            correct = 0
            for (data, target) in test_data_loader:

                data, target = torch.autograd.Variable(
                    data.cuda()), torch.autograd.Variable(target.cuda())
                output = model(data)
                loss = F.cross_entropy(output, target)
                pred = output.data.max(1)[1]
                correct += float(pred.eq(target.data).sum())
                loss_avg += float(loss)
                state['test_loss'] = loss_avg / len(test_data_loader.dataset)
                state['test_accuracy'] = correct / len(
                    test_data_loader.dataset)
            print(state['test_accuracy'])

    best_accuracy = 0.0
    for epoch in range(40):
        state['epoch'] = epoch
        train()
        test()
        sch.step(state['train_accuracy'])
        best_accuracy = (state['train_accuracy'] + state['test_accuracy']) / 2

        if best_accuracy > state['best_accuracy']:
            state['best_accuracy'] = best_accuracy
            torch.save(model.state_dict(), os.path.join(save_dr, idx + '.pth'))
            with open(os.path.join(save_dr, idx + '.json'), 'w') as f:
                f.write(json.dumps(state))
                f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])

        if state['test_accuracy'] == 1 and epoch > 10:
            break
Exemplo n.º 2
0
def run(trainr, testdr, name, cls_num, idx):

    imagenet_data = ImageFolder(trainr, transform=data_transforms['train'])
    test_data = ImageFolder(testdr, transform=data_transforms['val'])
    data_loader = DataLoader(imagenet_data, batch_size=8, shuffle=True)
    test_data_loader = DataLoader(test_data, batch_size=1, shuffle=True)
    model = se_resnext50_32x4d(num_classes=1000, pretrained=None)
    model.load_state_dict(
        torch.load('/home/dsl/all_check/se_resnext50_32x4d-a260b3a4.pth'),
        strict=False)
    model.last_linear = nn.Linear(2048, cls_num)
    #model.fc2 = nn.Linear(4, cls_num)
    #model.load_state_dict(torch.load('log/1006_iv_other.pth'), strict=False)
    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    optimizer = torch.optim.SGD(model.parameters(),
                                state['learning_rate'],
                                momentum=state['momentum'],
                                weight_decay=state['decay'],
                                nesterov=True)
    state['label_ix'] = imagenet_data.class_to_idx
    state['cls_name'] = name

    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                         factor=0.5,
                                         patience=5)
    ll = len(data_loader.dataset)
    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()

    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for (data, target) in progress(data_loader):
            data.detach().numpy()
            if data.size(0) != 8:
                break
            data, target = torch.autograd.Variable(
                data.cuda()), torch.autograd.Variable(target.cuda())
            output = model(data)
            pred = output.data.max(1)[1]
            correct += float(pred.eq(target.data).sum())

            optimizer.zero_grad()

            loss = focal_loss(output, target)
            loss.backward()
            optimizer.step()
            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, ll, loss_avg)

        state['train_accuracy'] = correct / len(data_loader.dataset)
        state['train_loss'] = loss_avg

    def test():
        with torch.no_grad():
            model.eval()
            loss_avg = 0.0
            correct = 0
            for batch_idx, (data, target) in enumerate(test_data_loader):
                data, target = torch.autograd.Variable(
                    data.cuda()), torch.autograd.Variable(target.cuda())
                output = model(data)
                loss = F.cross_entropy(output, target)
                pred = output.data.max(1)[1]
                correct += float(pred.eq(target.data).sum())
                loss_avg += float(loss)
                state['test_loss'] = loss_avg / len(test_data_loader)
                state['test_accuracy'] = correct / len(
                    test_data_loader.dataset)
            print(state['test_accuracy'])

    best_accuracy = 0.0
    for epoch in range(100):
        state['epoch'] = epoch
        train()
        test()
        sch.step(state['train_accuracy'])

        if best_accuracy < state['test_accuracy']:
            state['best_accuracy'] = state['test_accuracy']
            torch.save(
                model.state_dict(),
                os.path.join(
                    '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check',
                    idx + '.pth'))
        with open(
                os.path.join(
                    '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check',
                    idx + '.json'), 'w') as f:
            f.write(json.dumps(state))
            f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])
        if best_accuracy == 1.0:
            break
def train(batch_size=2, learning_rate=1e-2, train_epoch=100):
    # Normalizer(), Augmenter(), Resizer() 各转换时按顺序进行的
    transform = transforms.Compose([Normalizer(), Augmenter(), Resizer()])
    dataset = CocoDataset('./data/coco/', 'train2017', transform)
    data_loader = Data.DataLoader(dataset, 2, num_workers=2, shuffle=True, \
                                  collate_fn=collater, pin_memory=True)
    dataset_size = len(dataset)
    print('sample number:', dataset_size)
    print('epoch size:', dataset_size / batch_size)

    retinanet = RetinaNet()
    anchor = Anchor()
    focal_loss = FocalLoss()

    if cuda:
        retinanet = torch.nn.DataParallel(retinanet).cuda()
        anchor = anchor.cuda()
        focal_loss = focal_loss.cuda()
    retinanet.module.freeze_bn()

    optimizer = torch.optim.SGD(retinanet.parameters(),
                                lr=learning_rate,
                                momentum=0.9,
                                weight_decay=1e-4)
    '''
    class torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', 
        factor=0.1, patience=10, verbose=False, threshold=0.0001, 
        threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)
    :param optimer: 指的是网络的优化器
    :param mode: (str), 可选择‘min’或者‘max’,min表示当监控量停止下降的时候,学习率将减小,
                 max表示当监控量停止上升的时候,学习率将减小。默认值为‘min’
    :param factor: 学习率每次降低多少,new_lr = old_lr * factor
    :param patience=10: 容忍网路的性能不提升的次数,高于这个次数就降低学习率
    :param verbose: (bool), 如果为True,则为每次更新向stdout输出一条消息。 默认值:False
    :param threshold: (float), 测量新最佳值的阈值,仅关注重大变化。 默认值:1e-4
    :param cooldown: 减少lr后恢复正常操作之前要等待的时期数。 默认值:0。
    :param min_lr: 学习率的下限
    :param eps: 适用于lr的最小衰减。 如果新旧lr之间的差异小于eps,则忽略更新。 默认值:1e-8。
    ————————————————
    版权声明:本文为CSDN博主「张叫张大卫」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
    原文链接:https://blog.csdn.net/weixin_40100431/article/details/84311430
    '''
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    for epoch_num in range(train_epoch):
        epoch_loss = []

        for iter_num, data in enumerate(data_loader):
            iter_time = time.time()
            images, annots, scales = data
            if cuda:
                images = images.cuda()
                annots = annots.cuda()
                scales = scales.cuda()

            total_anchors = anchor(data['img'])
            classification, localization = retinanet(images)

            cls_loss, loc_loss = \
                focal_loss(classification, localization, total_anchors, annots)
            loss = cls_loss + loc_loss
            epoch_loss.append(float(loss))

            optimizer.zero_grad()
            loss.backward()
            '''
            关于torch.nn.utils.clip_grad_norm_(): 
            In some cases you may find that each layer of your net amplifies the 
            gradient it receives. This causes a problem because the lower layers of 
            the net then get huge gradients and their updates will be far too large 
            to allow the model to learn anything.

            This function ‘clips’ the norm of the gradients by scaling the gradients 
            down by the same amount in order to reduce the norm to an acceptable 
            level. In practice this places a limit on the size of the parameter 
            updates.

            The hope is that this will ensure that your model gets reasonably 
            sized gradients and that the corresponding updates will allow the 
            model to learn.
            引用自https://discuss.pytorch.org/t/about-torch-nn-utils-clip-grad-norm/13873 
            感受一下来自 PyTorch 讨论社区的窒息攻防,2333。。
            '''
            torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
            optimizer.step()

            print('|', 'Epoch:', epoch_num + 1, '|', 'Iter:', iter_num + 1,
                  '|', 'cls loss:', float(cls_loss), '|', 'loc loss:',
                  float(loc_loss), '|', 'loss:', float(loss), '|', 'lr:',
                  float(optimizer.learning_rate), '|', 'time:',
                  time.time() - iter_time)

        scheduler.step(np.mean(epoch_loss))

        print('Saving parameters in model on epoch', epoch_num + 1)
        torch.save(
            retinanet.state_dict(),
            './param/param_epoch' + str(epoch_num + 1).zfill(3) + '.pkl')
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x: '\033[94m' + x + '\033[0m'

classifier = PointNetReg(num_points=opt.npoints)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.0001, momentum=0.9)
classifier.cuda()

focal_loss = FocalLoss(opt.gamma)
focal_loss = focal_loss.cuda()

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, _, target, _ = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2, 1)

        points, target = points.cuda(), target.cuda()
        optimizer.zero_grad()
        pred, _ = classifier(points)
        #pred = pred.view(-1, 3)
        #target = target.view(-1, 3)
        loss = F.mse_loss(pred, target)
Exemplo n.º 5
0
def run(trainr,testdr, name,cls_num,idx):
    batch_size = 8
    data_loader, imagenet_data,new_lbs = load_dat(batch_size, trainr)

    model = se_resnext50_32x4d(num_classes=1000,pretrained=None)
    model.load_state_dict(torch.load('/home/dsl/all_check/se_resnext50_32x4d-a260b3a4.pth'), strict=False)
    model.fc1 = nn.Linear(2048, 4)
    model.fc2 = nn.Linear(4, cls_num)
    #model.load_state_dict(torch.load('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check/1009_res_total.pth'), strict=False)
    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'],
                                weight_decay=state['decay'], nesterov=True)
    state['label_ix'] = imagenet_data.class_to_idx


    state['cls_name'] = name
    centerloss = CenterLoss(cls_num, 4)
    centerloss.cuda()
    optimzer_center = torch.optim.SGD(centerloss.parameters(), lr=0.3)
    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5)
    ll = len(data_loader.dataset)
    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()
    state['train_accuracy'] =0
    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for (data, target) in progress(data_loader):
            data.detach().numpy()
            if data.size(0) != batch_size:
                break
            data, target = torch.autograd.Variable(data.cuda()), torch.autograd.Variable(target.cuda())
            f1, output = model(data)
            pred = output.data.max(1)[1]
            correct += float(pred.eq(target.data).sum())

            optimizer.zero_grad()
            optimzer_center.zero_grad()

            loss = focal_loss(output, target)+ centerloss(target, f1)*0.3
            loss.backward()
            optimizer.step()
            optimzer_center.step()


            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, ll, loss_avg)
        state['train_accuracy'] = correct / len(data_loader.dataset)


        state['train_loss'] = loss_avg
    def test():
        with torch.no_grad():
            model.eval()
            loss_avg = 0.0
            correct = 0
            for k in glob.glob(os.path.join(testdr,'*.jpg')):
                imag = Image.open(k)
                ig = data_transforms['val'](imag)
                ig = ig.unsqueeze(0)
                ig = torch.autograd.Variable(ig.cuda())
                f1, output = model(ig)
                output = F.softmax(output, dim=1)
                pred = output.data.squeeze(dim=0).cpu().numpy()
                score = np.asarray(pred)
                score = np.sum(score, axis=0)
                pred_lb = np.argmax(score)
                sc = np.max(score)
                print(k)
                lbs = new_lbs[pred_lb]
                if sc>0.66:
                    shutil.copy(k,os.path.join(train_dr, lbs))
                else:
                    try:
                        nn_name = k.split('/')[-1]
                        os.remove(os.path.join(train_dr, lbs, nn_name))
                    except:
                        pass

    best_accuracy = 0.0
    for epoch in range(100):
        state['epoch'] = epoch
        train()
        test()
        data_loader, imagenet_data, new_lbs = load_dat(batch_size, trainr)
        sch.step(state['train_accuracy'])

        if best_accuracy < state['train_accuracy']:
            state['best_accuracy'] =  state['train_accuracy']
            torch.save(model.state_dict(), os.path.join('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check', idx+'.pth'))
        with open(os.path.join('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check', idx+'.json'),'w') as f:
            f.write(json.dumps(state))
            f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])
        if best_accuracy == 1.0:
            break
Exemplo n.º 6
0
def do_train(model_name,
             model,
             train_loader,
             val_loader,
             device,
             lr=0.0001,
             n_ep=40,
             num_classes=3,
             save_path='/tmp'):

    # Classifier = layer.MarginCosineProduct(1664, num_classes)
    # Classifier = Classifier.cuda()
    optimizer = torch.optim.Adam(model.parameters(),
                                 weight_decay=1e-5,
                                 lr=0.00001)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           verbose=True,
                                                           patience=2)
    criterion = nn.CrossEntropyLoss().cuda()
    focal_loss = FocalLoss(class_num=num_classes)
    focal_loss = focal_loss.cuda()
    grad_loss = Grad_loss().cuda()
    best_acc = 0.0
    # do training
    for i_ep in range(n_ep):
        model.train()
        train_losses = []
        widgets = [
            'train :',
            Percentage(), ' ',
            Bar('#'), ' ',
            Timer(), ' ',
            ETA(), ' ',
            FileTransferSpeed()
        ]
        pbar = ProgressBar(widgets=widgets)
        for batch_data in pbar(train_loader):
            image = batch_data['image'].type(torch.FloatTensor).to(device)
            label = batch_data['label_idx'].to(device)
            x = Variable(image, requires_grad=True)
            optimizer.zero_grad()
            logits = model(x)
            # logits = Classifier(logits,label)
            # loss = focal_loss(logits, label)#F.cross_entropy
            loss = criterion(logits, label)
            loss.backward()
            if np.random.randint(2):
                pertubation(x, image)
                model.zero_grad()
                logits = model(x)
                loss = criterion(logits, label)
                # loss += loss2
                loss.backward()
            optimizer.step()
            train_losses += [loss.detach().cpu().numpy().reshape(-1)]
        train_losses = np.concatenate(train_losses).reshape(-1).mean()

        model.eval()
        val_losses = []
        preds = []
        true_labels = []
        widgets = [
            'val:',
            Percentage(), ' ',
            Bar('#'), ' ',
            Timer(), ' ',
            ETA(), ' ',
            FileTransferSpeed()
        ]
        pbar = ProgressBar(widgets=widgets)
        for batch_data in pbar(val_loader):
            image = batch_data['image'].type(torch.FloatTensor).to(device)
            label = batch_data['label_idx'].to(device)
            # image = Variable(image, requires_grad=True)
            with torch.no_grad():
                logits = model(image)
            # logits = Classifier(logits,label)
            loss = criterion(logits, label).detach().cpu().numpy().reshape(-1)
            # loss2 = grad_loss(image)
            # loss += loss2.detach().cpu().numpy().reshape(-1)
            # loss = focal_loss(logits, label).detach().cpu().numpy().reshape(-1)
            val_losses += [loss]
            true_labels += [label.detach().cpu().numpy()]
            preds += [(logits.max(1)[1].detach().cpu().numpy())]

        preds = np.concatenate(preds, 0).reshape(-1)
        true_labels = np.concatenate(true_labels, 0).reshape(-1)
        acc = accuracy_score(true_labels, preds)
        val_losses = np.concatenate(val_losses).reshape(-1).mean()
        scheduler.step(val_losses)
        # need python3.6
        print(
            f'Epoch : {i_ep}  val_acc : {acc:.5%} ||| train_loss : {train_losses:.5f}  val_loss : {val_losses:.5f}  |||'
        )
        if acc > best_acc:
            best_acc = acc
            files2remove = glob.glob(os.path.join(save_path, 'ep_*'))
            for _i in files2remove:
                os.remove(_i)
            torch.save(
                model.cpu().state_dict(),
                os.path.join(save_path,
                             f'ep_{i_ep}_{model_name}_val_acc_{acc:.4f}.pth'))
            torch.save(
                model,
                os.path.join(save_path,
                             f'ep_{i_ep}_{model_name}_val_acc_{acc:.4f}.pkl'))
            model.to(device)
        elif i_ep % 10 == 9:
            torch.save(
                model.cpu().state_dict(),
                os.path.join(save_path,
                             f'ep_{i_ep}_{model_name}_val_acc_{acc:.4f}.pth'))
            torch.save(
                model,
                os.path.join(save_path,
                             f'ep_{i_ep}_{model_name}_val_acc_{acc:.4f}.pkl'))
            model.to(device)
Exemplo n.º 7
0
def run(train_sets,valid_sets, cls_num,idx):
    batch_size = 16
    train_gen = get_batch(batch_size= batch_size,data_set=train_sets, image_size=train_sets.image_size)
    valid_gen = get_batch(batch_size= 1,data_set=valid_sets, image_size=train_sets.image_size)
    model = resnet18(num_classes=1000,pretrained=None)
    model.load_state_dict(torch.load('/home/dsl/all_check/resnet18-5c106cde.pth'), strict=False)
    model.fc = nn.Linear(512,1)
    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'],
                                weight_decay=state['decay'], nesterov=True)
    state['label_ix'] = train_sets.cls_map
    state['cls_name'] = idx
    centerloss = CenterLoss(cls_num,2)
    centerloss.cuda()
    optimzer_center = torch.optim.SGD(centerloss.parameters(), lr=0.3)
    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5)
    ll = train_sets.len()
    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()

    bc_loss = nn.BCEWithLogitsLoss()

    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for b in range(int(train_sets.len()/batch_size)):
            images, labels = next(train_gen)
            images = np.transpose(images,[0,3,1,2])
            images = torch.from_numpy(images)
            labels = torch.from_numpy(labels).float()
            data, target = torch.autograd.Variable(images.cuda()), torch.autograd.Variable(labels.cuda())
            output = model(data)

            output = output.squeeze()
            ot = F.sigmoid(output)
            pred = ot.ge(0.5).float()

            correct += float(pred.eq(target.data).sum())

            optimizer.zero_grad()

            loss = bc_loss(output, target)
            loss.backward()
            optimizer.step()



            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, ll, loss_avg)
        state['train_accuracy'] = correct / train_sets.len()
        state['train_loss'] = loss_avg
    def test():
        with torch.no_grad():
            model.eval()
            loss_avg = 0.0
            correct = 0
            for i in range(valid_sets.len()):
                images, labels = next(valid_gen)
                images = np.transpose(images, [0,3,1,2])
                images = torch.from_numpy(images)
                labels = torch.from_numpy(labels).float()
                data, target = torch.autograd.Variable(images.cuda()), torch.autograd.Variable(labels.cuda())
                output = model(data)
                output = output.squeeze()
                ot = F.sigmoid(output)
                pred = ot.ge(0.5).float()
                correct += float(pred.eq(target.data).sum())
                state['test_accuracy'] = correct / valid_sets.len()
            print(state['test_accuracy'])

    best_accuracy = 0.0
    for epoch in range(40):
        state['epoch'] = epoch
        train()
        test()
        sch.step(state['train_accuracy'])
        best_accuracy = (state['train_accuracy']+state['test_accuracy'])/2

        if best_accuracy > state['best_accuracy']:
            state['best_accuracy'] =  best_accuracy
            torch.save(model.state_dict(), os.path.join('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/AIChallenger2018/zuixin/be', idx+'.pth'))
            with open(os.path.join('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/AIChallenger2018/zuixin/be', idx+'.json'),'w') as f:
                f.write(json.dumps(state))
                f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])

        if state['train_accuracy']-state['test_accuracy']>0.06 and epoch>30:
            break
Exemplo n.º 8
0
def run(trainr, name, cls_num, idx):

    imagenet_data = ImageFolder(trainr, transform=data_transforms['train'])
    data_loader = DataLoader(imagenet_data, batch_size=6, shuffle=True)
    model = inception_v3(num_classes=1000, pretrained=None, aux_logits=False)
    model.load_state_dict(torch.load(
        'D:/deep_learn_data/check/inception_v3_google-1a9a5a14.pth'),
                          strict=False)
    model.fc1 = nn.Linear(2048, 4)
    model.fc2 = nn.Linear(4, 12)
    #model.load_state_dict(torch.load('D:/deep_learn_data/luntai/check/1.pth'), strict=False)
    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    optimizer = torch.optim.SGD(model.parameters(),
                                state['learning_rate'],
                                momentum=state['momentum'],
                                weight_decay=state['decay'],
                                nesterov=True)
    state['label_ix'] = imagenet_data.class_to_idx
    state['cls_name'] = name
    centerloss = CenterLoss(cls_num, 4)
    centerloss.cuda()
    optimzer_center = torch.optim.SGD(centerloss.parameters(), lr=0.3)
    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                         factor=0.5,
                                         patience=5)
    ll = len(data_loader.dataset)
    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()

    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for (data, target) in progress(data_loader):
            data.detach().numpy()
            if data.size(0) != 6:
                break
            data, target = torch.autograd.Variable(
                data.cuda()), torch.autograd.Variable(target.cuda())
            f1, output = model(data)
            pred = output.data.max(1)[1]
            correct += float(pred.eq(target.data).sum())

            optimizer.zero_grad()
            optimzer_center.zero_grad()

            loss = focal_loss(output, target) + centerloss(target, f1) * 0.3
            loss.backward()
            optimizer.step()
            optimzer_center.step()

            ip1_loader.append(f1)
            idx_loader.append((target))

            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, ll, loss_avg)

        state['train_accuracy'] = correct / len(data_loader.dataset)
        feat = torch.cat(ip1_loader, 0)
        labels = torch.cat(idx_loader, 0)
        visualize(feat.data.cpu().numpy(),
                  labels.data.cpu().numpy(), epoch, cls_num)
        state['train_loss'] = loss_avg

    best_accuracy = 0.0
    for epoch in range(100):
        state['epoch'] = epoch
        train()
        sch.step(state['train_accuracy'])

        if best_accuracy < state['train_accuracy']:
            state['best_accuracy'] = state['train_accuracy']
            torch.save(model.state_dict(), os.path.join('./log', idx + '.pth'))
        with open(os.path.join('./log', idx + '.json'), 'w') as f:
            f.write(json.dumps(state))
            f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])
        if best_accuracy == 1.0 or state['train_accuracy'] > 0.99:
            break
Exemplo n.º 9
0
def run(trainr, name, cls_num, idx):

    imagenet_data = ImageFolder(trainr, transform=data_transforms['train'])
    data_loader = DataLoader(imagenet_data, batch_size=6, shuffle=True)
    model = inceptionv4(num_classes=1000, pretrained=None)
    model.avg_pool = nn.AvgPool2d(13, count_include_pad=False)
    model.load_state_dict(
        torch.load('D:/deep_learn_data/check/se_resnext50_32x4d-a260b3a4.pth'),
        strict=False)
    model.last_linear = nn.Linear(1536, cls_num)

    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    optimizer = torch.optim.SGD(model.parameters(),
                                state['learning_rate'],
                                momentum=state['momentum'],
                                weight_decay=state['decay'],
                                nesterov=True)
    state['label_ix'] = imagenet_data.class_to_idx
    state['cls_name'] = name

    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                         factor=0.5,
                                         patience=5)
    ll = len(data_loader.dataset)
    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()

    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for (data, target) in progress(data_loader):
            data.detach().numpy()
            if data.size(0) != 6:
                break
            data, target = torch.autograd.Variable(
                data.cuda()), torch.autograd.Variable(target.cuda())
            output = model(data)
            pred = output.data.max(1)[1]
            correct += float(pred.eq(target.data).sum())
            optimizer.zero_grad()
            loss = focal_loss(output, target)
            loss.backward()
            optimizer.step()
            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, ll, loss_avg)

        state['train_accuracy'] = correct / len(data_loader.dataset)

        state['train_loss'] = loss_avg

    best_accuracy = 0.0
    for epoch in range(100):
        state['epoch'] = epoch
        train()
        sch.step(state['train_accuracy'])

        if best_accuracy < state['train_accuracy']:
            state['best_accuracy'] = state['train_accuracy']
            torch.save(model.state_dict(), os.path.join('./log', idx + '.pth'))
        with open(os.path.join('./log', idx + '.json'), 'w') as f:
            f.write(json.dumps(state))
            f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])
        if best_accuracy == 1.0 or state['train_accuracy'] > 0.98:
            break
Exemplo n.º 10
0
def run(trainr,test_dr, name,svdr):
    print(test_dr)
    batch_size = 16
    imagenet_data = ImageFolder(trainr,
                                transform=data_transforms['train'])

    test_data = ImageFolder(test_dr,transform=data_transforms['val'])
    cls_num = len(imagenet_data.class_to_idx)

    data_loader = DataLoader(imagenet_data, batch_size=batch_size, shuffle=True)
    test_data_loader = DataLoader(test_data, batch_size=1, shuffle=True)

    model = se_resnext50_32x4d(num_classes=1000,pretrained=None)
    model.load_state_dict(torch.load('/home/dsl/all_check/se_resnext50_32x4d-a260b3a4.pth'), strict=False)
    model.fc1 = nn.Linear(2048, 2)
    model.fc2 = nn.Linear(2, cls_num)
    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'],
                                weight_decay=state['decay'], nesterov=True)
    state['label_ix'] = imagenet_data.class_to_idx
    state['cls_name'] = name
    centerloss = CenterLoss(cls_num,2)
    centerloss.cuda()
    optimzer_center = torch.optim.SGD(centerloss.parameters(), lr=0.3)
    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5)
    ll = len(data_loader.dataset)
    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()

    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for (data, target) in progress(data_loader):

            data, target = torch.autograd.Variable(data.cuda()), torch.autograd.Variable(target.cuda())
            f1, output = model(data)
            pred = output.data.max(1)[1]
            correct += float(pred.eq(target.data).sum())

            optimizer.zero_grad()
            optimzer_center.zero_grad()

            loss = focal_loss(output, target)+ centerloss(target, f1)*0.4
            loss.backward()
            optimizer.step()
            optimzer_center.step()

            ip1_loader.append(f1)
            idx_loader.append((target))

            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, ll, loss_avg)

        state['train_accuracy'] = correct / len(data_loader.dataset)
        feat = torch.cat(ip1_loader, 0)
        labels = torch.cat(idx_loader, 0)
        state['train_loss'] = loss_avg
    def test():
        with torch.no_grad():
            model.eval()
            loss_avg = 0.0
            correct = 0
            print(test_data_loader)
            for batch_idx, (data, target) in enumerate(test_data_loader):

                data, target = torch.autograd.Variable(data.cuda()), torch.autograd.Variable(target.cuda())
                f1, output = model(data)
                loss = F.cross_entropy(output, target)
                pred = output.data.max(1)[1]
                correct += float(pred.eq(target.data).sum())
                loss_avg += float(loss)
                state['test_loss'] = loss_avg / len(test_data_loader)
                state['test_accuracy'] = correct / len(test_data_loader.dataset)
            print(state['test_accuracy'])

    best_accuracy = 0.0
    for epoch in range(50):
        state['epoch'] = epoch
        train()
        test()
        sch.step(state['train_accuracy'])

        best_accuracy = (state['train_accuracy'] + state['test_accuracy']) / 2

        if best_accuracy > state['best_accuracy']:
            state['best_accuracy'] = best_accuracy
            torch.save(model.state_dict(), os.path.join(svdr, name + '.pth'))
            with open(os.path.join(svdr, name + '.json'), 'w') as f:
                f.write(json.dumps(state))
                f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])
        if best_accuracy == 1.0:
            break