示例#1
0
    def init_environment(self, args):
        # Decide which processor (CPU or GPU) to use.
        if not args.use_avai_gpus:
            os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
            os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices

        self.use_gpu = torch.cuda.is_available()
        if args.use_cpu:
            self.use_gpu = False

        # Start logger.
        self.ts = time.strftime("%Y-%m-%d_%H-%M-%S_")

        self.log_name = self.ts + 'test' + '.log' if args.evaluate else self.ts + 'train' + '.log'
        sys.stdout = Logger(osp.join(args.save_experiment, self.log_name))

        # Print out the arguments taken from Terminal (or defaults).
        print('==========\nArgs:{}\n=========='.format(args))

        print("Timestamp: " + self.ts)
        # Warn if not using GPU.
        if self.use_gpu:
            print('Currently using GPU {}'.format(args.gpu_devices))
            cudnn.benchmark = True
        else:
            warnings.warn(
                'Currently using CPU, however, GPU is highly recommended')

        if self.args.fix_seed:
            self.fix_seed()
示例#2
0
def main(args):

    # log setting
    log_name = "log_test.log" if args.evaluate else "log_train.log"
    sys.stdout = Logger(osp.join(args.log_dir, log_name))

    # cuda setting
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_devices
    print("Currently using GPU {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))

    print("Building re-id model ")
    model = ResReid(args.num_classes)

    if args.load_weights:
        pretrain_models = flow.load(args.load_weights)
        model.load_state_dict(pretrain_models, strict=False)

    model = model.to("cuda")

    print("=> init dataset")
    dataset = Market1501(root=args.data_dir)

    if args.evaluate:
        evaluate(model, dataset)
    else:
        optimizer = flow.optim.Adam(
            model.parameters(),
            lr=args.lr,
            weight_decay=args.weight_decay,
            betas=(args.adam_beta1, args.adam_beta2),
        )

        # lr scheduler
        if args.warmup:
            scheduler = WarmupMultiStepLR(
                optimizer,
                milestones=args.step_size,
                gamma=args.gamma,
                warmup_factor=args.warmup_factor,
                warmup_iters=args.warmup_iters,
            )
        else:
            scheduler = flow.optim.lr_scheduler.LambdaLR(
                optimizer,
                lr_lambda=lambda epoch: args.lr**bisect_right(
                    args.step_size, epoch),
            )

        train(model, dataset, args.num_classes, optimizer, scheduler)
示例#3
0
文件: tester.py 项目: hsfzxjy/WRSFKM
    def target(self, index):

        from init import init_uv
        from iterations import run

        X, C, labels = load_dataset(self.dataset)

        U, V = init_uv(X, C, Params(dict(**self.init_params, **self.mutual)))

        initial = {name: (U.copy(), V.copy()) for name in self.params}

        result = {}

        for name, param in self.params.items():
            p = Params({
                **param,
                **self.mutual, 'initial': initial[name],
                'init': 'preset',
                'C': C
            })

            dest = os.path.join(self.root_directory, name + '.h5.' +
                                str(index)) if self.root_directory else ''

            print('running', name)
            logger = Logger(dest)
            start_time = time()
            result = run(X, labels, p, logger)
            end_time = time()
            time_elasped = end_time - start_time
            result = (*result, time_elasped)
            print(name, result[2:])
            logger.log_final(*result)
            logger.close()

        return result
示例#4
0
def main():
    #GENERAL
    root = "/home/kuru/Desktop/veri-gms-master/"
    train_dir = '/home/kuru/Desktop/veri-gms-master/VeRispan/image_train/'
    source = {'veri'}
    target = {'veri'}
    workers = 2
    height = 320
    width = 320
    train_sampler = 'RandomSampler'

    #AUGMENTATION
    random_erase = True
    jitter = True
    aug = True

    #OPTIMIZATION
    opt = 'adam'
    lr = 0.0003
    weight_decay = 5e-4
    momentum = 0.9
    sgd_damp = 0.0
    nesterov = True
    warmup_factor = 0.01
    warmup_method = 'linear'

    #HYPERPARAMETER
    max_epoch = 80
    start = 0
    train_batch_size = 16
    test_batch_size = 50

    #SCHEDULER
    lr_scheduler = 'multi_step'
    stepsize = [30, 60]
    gamma = 0.1

    #LOSS
    margin = 0.3
    num_instances = 6
    lambda_tri = 1

    #MODEL
    arch = 'resnet101_ibn_a'
    no_pretrained = False

    #TEST SETTINGS
    load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth'
    #load_weights = None
    start_eval = 0
    eval_freq = -1

    #MISC
    use_gpu = True
    use_amp = True
    print_freq = 50
    seed = 1
    resume = ''
    save_dir = '/home/kuru/Desktop/veri-gms-master/logapex/'
    gpu_id = 0
    vis_rank = True
    query_remove = True
    evaluate = False

    dataset_kwargs = {
        'source_names': source,
        'target_names': target,
        'root': root,
        'height': height,
        'width': width,
        'train_batch_size': train_batch_size,
        'test_batch_size': test_batch_size,
        'train_sampler': train_sampler,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
    }
    transform_kwargs = {
        'height': height,
        'width': width,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
    }

    optimizer_kwargs = {
        'optim': opt,
        'lr': lr,
        'weight_decay': weight_decay,
        'momentum': momentum,
        'sgd_dampening': sgd_damp,
        'sgd_nesterov': nesterov
    }

    lr_scheduler_kwargs = {
        'lr_scheduler': lr_scheduler,
        'stepsize': stepsize,
        'gamma': gamma
    }

    use_gpu = torch.cuda.is_available()
    log_name = 'log_test.txt' if evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(save_dir, log_name))
    print('Currently using GPU ', gpu_id)
    cudnn.benchmark = True

    print('Initializing image data manager')
    dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master/',
                                   name='veri')
    train = []
    num_train_pids = 0
    num_train_cams = 0

    for img_path, pid, camid in dataset.train:
        path = img_path[52:77]
        #print(path)
        folder = path[1:4]
        pid += num_train_pids
        camid += num_train_cams
        train.append((path, folder, pid, camid))

    num_train_pids += dataset.num_train_pids
    num_train_cams += dataset.num_train_cams

    pid = 0
    pidx = {}
    for img_path, pid, camid in dataset.train:
        path = img_path[52:77]

        folder = path[1:4]
        pidx[folder] = pid
        pid += 1

    path = '/home/kuru/Desktop/veri-gms-master/gms/'
    pkl = {}
    entries = os.listdir(path)
    for name in entries:
        f = open((path + name), 'rb')
        if name == 'featureMatrix.pkl':
            s = name[0:13]
        else:
            s = name[0:3]
        pkl[s] = pickle.load(f)
        f.close

    transform_t = train_transforms(**transform_kwargs)

    data_tfr = vd(
        pkl_file='index.pkl',
        dataset=train,
        root_dir='/home/kuru/Desktop/veri-gms-master/VeRi/image_train/',
        transform=transform_t)
    trainloader = DataLoader(data_tfr,
                             sampler=None,
                             batch_size=train_batch_size,
                             shuffle=True,
                             num_workers=workers,
                             pin_memory=False,
                             drop_last=True)

    #data_tfr = vd(pkl_file='index.pkl', dataset = train, root_dir=train_dir,transform=transforms.Compose([Rescale(64),RandomCrop(32),ToTensor()]))
    #dataloader = DataLoader(data_tfr, batch_size=batch_size, shuffle=False, num_workers=0)

    print('Initializing test data manager')
    dm = ImageDataManager(use_gpu, **dataset_kwargs)
    testloader_dict = dm.return_dataloaders()

    print('Initializing model: {}'.format(arch))
    model = models.init_model(name=arch,
                              num_classes=num_train_pids,
                              loss={'xent', 'htri'},
                              last_stride=1,
                              pretrained=not no_pretrained,
                              use_gpu=use_gpu)
    print('Model size: {:.3f} M'.format(count_num_param(model)))

    if load_weights is not None:
        print("weights loaded")
        load_pretrained_weights(model, load_weights)

    model = (model).cuda() if use_gpu else model

    #model = nn.DataParallel(model).cuda() if use_gpu else model
    optimizer = init_optimizer(model, **optimizer_kwargs)
    #optimizer = init_optimizer(model)

    model, optimizer = amp.initialize(model,
                                      optimizer,
                                      opt_level="O2",
                                      keep_batchnorm_fp32=True,
                                      loss_scale="dynamic")
    model = nn.DataParallel(model).cuda() if use_gpu else model
    scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs)

    criterion_xent = CrossEntropyLoss(num_classes=num_train_pids,
                                      use_gpu=use_gpu,
                                      label_smooth=True)
    criterion_htri = TripletLoss(margin=margin)
    ranking_loss = nn.MarginRankingLoss(margin=margin)

    if evaluate:
        print('Evaluate only')

        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            _, distmat = test(model,
                              queryloader,
                              galleryloader,
                              train_batch_size,
                              use_gpu,
                              return_distmat=True)

            if vis_rank:
                visualize_ranked_results(distmat,
                                         dm.return_testdataset_by_name(name),
                                         save_dir=osp.join(
                                             save_dir, 'ranked_results', name),
                                         topk=20)
        return

    time_start = time.time()
    ranklogger = RankLogger(source, target)
    print('=> Start training')

    data_index = search(pkl)

    for epoch in range(start, max_epoch):
        losses = AverageMeter()
        #xent_losses = AverageMeter()
        htri_losses = AverageMeter()
        accs = AverageMeter()
        batch_time = AverageMeter()

        model.train()
        for p in model.parameters():
            p.requires_grad = True  # open all layers

        end = time.time()
        for batch_idx, (img, label, index, pid, cid) in enumerate(trainloader):
            trainX, trainY = torch.zeros(
                (train_batch_size * 3, 3, height, width),
                dtype=torch.float32), torch.zeros((train_batch_size * 3),
                                                  dtype=torch.int64)
            #pids = torch.zeros((batch_size*3), dtype = torch.int16)
            for i in range(train_batch_size):

                labelx = str(label[i])
                indexx = int(index[i])
                cidx = int(pid[i])
                if indexx > len(pkl[labelx]) - 1:
                    indexx = len(pkl[labelx]) - 1

                #maxx = np.argmax(pkl[labelx][indexx])
                a = pkl[labelx][indexx]
                minpos = np.argmax(ma.masked_where(a == 0, a))
                pos_dic = data_tfr[data_index[cidx][1] + minpos]

                neg_label = int(labelx)
                while True:
                    neg_label = random.choice(range(1, 770))
                    if neg_label is not int(labelx) and os.path.isdir(
                            os.path.join(
                                '/home/kuru/Desktop/adiusb/veri-split/train',
                                strint(neg_label))) is True:
                        break
                negative_label = strint(neg_label)
                neg_cid = pidx[negative_label]
                neg_index = random.choice(range(0, len(pkl[negative_label])))

                neg_dic = data_tfr[data_index[neg_cid][1] + neg_index]
                trainX[i] = img[i]
                trainX[i + train_batch_size] = pos_dic[0]
                trainX[i + (train_batch_size * 2)] = neg_dic[0]
                trainY[i] = cidx
                trainY[i + train_batch_size] = pos_dic[3]
                trainY[i + (train_batch_size * 2)] = neg_dic[3]

                #print("anc",labelx,'posdic', pos_dic[1],pos_dic[2],'negdic', neg_dic[1],neg_dic[2])

            trainX = trainX.cuda()
            trainY = trainY.cuda()
            outputs, features = model(trainX)
            xent_loss = criterion_xent(outputs[0:train_batch_size],
                                       trainY[0:train_batch_size])
            htri_loss = criterion_htri(features, trainY)

            #tri_loss = ranking_loss(features)
            #ent_loss = xent_loss(outputs[0:batch_size], trainY[0:batch_size], num_train_pids)

            loss = htri_loss + xent_loss
            optimizer.zero_grad()

            if use_amp:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            #loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            losses.update(loss.item(), trainY.size(0))
            htri_losses.update(htri_loss.item(), trainY.size(0))
            accs.update(
                accuracy(outputs[0:train_batch_size],
                         trainY[0:train_batch_size])[0])

            if (batch_idx) % print_freq == 0:
                print('Train ', end=" ")
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
                          epoch + 1,
                          batch_idx + 1,
                          len(trainloader),
                          batch_time=batch_time,
                          loss=htri_losses,
                          acc=accs))

            end = time.time()

        scheduler.step()
        print('=> Test')

        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            rank1, distmat = test(model, queryloader, galleryloader,
                                  test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank1)
            rank2, distmat2 = test_rerank(model, queryloader, galleryloader,
                                          test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank2)
        del queryloader
        del galleryloader
        del distmat
        #print(torch.cuda.memory_allocated(),torch.cuda.memory_cached())
        torch.cuda.empty_cache()

        if (epoch + 1) == max_epoch:
            #if (epoch + 1) % 10 == 0:
            print('=> Test')
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'rank1': rank1,
                    'epoch': epoch + 1,
                    'arch': arch,
                    'optimizer': optimizer.state_dict(),
                }, save_dir)

            if vis_rank:
                visualize_ranked_results(distmat,
                                         dm.return_testdataset_by_name(name),
                                         save_dir=osp.join(
                                             save_dir, 'ranked_results', name),
                                         topk=20)
示例#5
0
            log.logger.error("PARSE THE PAGE ERROR AND ERROR MESSAGE: " + traceback.format_exc())

    def start(self):
        base_url = 'http://fund.eastmoney.com/data/FundGuideapi.aspx?dt=0&ft={}&sd=&ed=&sc=z&st=desc' \
                   '&pi={}&pn=20&zf=diy&sh=list'
        fund_type_list = ["zq", "zs", "gp", "hh"]
        for fund_type in fund_type_list:
            current_page = 1
            while True:
                try:
                    text = self.connect_to_url(base_url.format(fund_type, current_page))
                except ConnectionError:
                    log.logger.error("CONNECT TO URL ERROR AND URL: " + base_url.format(fund_type, current_page))
                    break
                try:
                    page_flag = self.parse(text)
                    if page_flag is False:
                        break
                    else:
                        current_page += 1
                        time.sleep(random.uniform(4, 6))
                except:
                    break
            time.sleep(random.uniform(5, 8))


if __name__ == '__main__':
    log_day = time.strftime("%Y-%m-%d", time.localtime())
    log = Logger(abspath + '/logs/GetFundInfo_' + log_day + '.log', level='info')
    GetAllBondFundCrawler().start()
示例#6
0
if reweighting:
    ext_f = "arm"
elif with_attribute:
    ext_f = "attribute"
output_folder = "{}{}_{}".format(backbone_name,
                                 CONFIG['MODEL']['ARCH'],
                                 ext_f)

save_dir = osp.join('./logs', CONFIG['DATASET']['NAME'], output_folder)
checkpoint = osp.join('./checkpoints',
                      CONFIG['DATASET']['NAME'], output_folder)

mkdir_if_missing(save_dir)
mkdir_if_missing(checkpoint)
log_name = f"train_{time.strftime('-%Y-%m-%d-%H-%M-%S')}.log"
sys.stdout = Logger(osp.join(save_dir, log_name))

timestamp = time.strftime("0:%Y-%m-%dT%H-%M-%S")
summary_writer = SummaryWriter(osp.join(save_dir, 'tensorboard_log' + timestamp))

if __name__ == "__main__":

    train_loader, val_loader, class_names, attrs = get_data(dataset=CONFIG['DATASET']['NAME'],
                                                            root=CONFIG['DATASET']['ROOT'],
                                                            train_folder=CONFIG['DATASET']['TRAIN'],
                                                            val_folder=CONFIG['DATASET']['VAL'],
                                                            ten_crops=CONFIG['TESTING']['TEN_CROPS'],
                                                            batch_size=CONFIG['TRAINING']['BATCH_SIZE'],
                                                            with_attribute=with_attribute)

    print('checkpoint dir {}'.format(checkpoint))
示例#7
0
def main():
    #GENERAL
    torch.cuda.empty_cache()
    root = "/home/kuru/Desktop/veri-gms-master/"
    train_dir = '/home/kuru/Desktop/veri-gms-master/VeRispan/image_train/'
    source = {'verispan'}
    target = {'verispan'}
    workers = 4
    height = 320
    width = 320
    train_sampler = 'RandomSampler'

    #AUGMENTATION
    random_erase = True
    jitter = True
    aug = True

    #OPTIMIZATION
    opt = 'adam'
    lr = 0.001
    weight_decay = 5e-4
    momentum = 0.9
    sgd_damp = 0.0
    nesterov = True
    warmup_factor = 0.01
    warmup_method = 'linear'

    STEPS = (30, 60)
    GAMMA = 0.1
    WARMUP_FACTOR = 0.01
    WARMUP_EPOCHS = 10
    WARMUP_METHOD = 'linear'

    #HYPERPARAMETER
    max_epoch = 80
    start = 0
    train_batch_size = 16
    test_batch_size = 50

    #SCHEDULER
    lr_scheduler = 'multi_step'
    stepsize = [30, 60]
    gamma = 0.1

    #LOSS
    margin = 0.3
    num_instances = 4
    lambda_tri = 1

    #MODEL
    #arch = 'resnet101'
    arch = 'resnet50_ibn_a'
    no_pretrained = False

    #TEST SETTINGS
    #load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth'

    #load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth'
    load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet50_ibn_a.pth'

    #load_weights = None
    start_eval = 0
    eval_freq = -1

    num_classes = 776
    feat_dim = 2048
    CENTER_LR = 0.5
    CENTER_LOSS_WEIGHT = 0.0005
    center_criterion = CenterLoss(num_classes=num_classes,
                                  feat_dim=feat_dim,
                                  use_gpu=True)
    optimizer_center = torch.optim.SGD(center_criterion.parameters(),
                                       lr=CENTER_LR)

    #MISC
    use_gpu = True
    #use_gpu = False
    print_freq = 10
    seed = 1
    resume = ''
    save_dir = '/home/kuru/Desktop/veri-gms-master_noise/spanningtree_veri_pure/'
    gpu_id = 0, 1
    vis_rank = True
    query_remove = True
    evaluate = False

    dataset_kwargs = {
        'source_names': source,
        'target_names': target,
        'root': root,
        'height': height,
        'width': width,
        'train_batch_size': train_batch_size,
        'test_batch_size': test_batch_size,
        'train_sampler': train_sampler,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
    }
    transform_kwargs = {
        'height': height,
        'width': width,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
    }

    optimizer_kwargs = {
        'optim': opt,
        'lr': lr,
        'weight_decay': weight_decay,
        'momentum': momentum,
        'sgd_dampening': sgd_damp,
        'sgd_nesterov': nesterov
    }

    lr_scheduler_kwargs = {
        'lr_scheduler': lr_scheduler,
        'stepsize': stepsize,
        'gamma': gamma
    }

    use_gpu = torch.cuda.is_available()

    log_name = 'log_test.txt' if evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(save_dir, log_name))
    print('Currently using GPU ', gpu_id)
    cudnn.benchmark = True

    print('Initializing image data manager')
    #dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master/', name='veri')
    dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master/',
                                   name='verispan')
    train = []
    num_train_pids = 0
    num_train_cams = 0
    print(len(dataset.train))

    for img_path, pid, camid, subid, countid in dataset.train:
        #print(img_path)
        path = img_path[56:90 + 6]
        #print(path)
        folder = path[1:4]
        #print(folder)
        #print(img_path, pid, camid,subid,countid)
        pid += num_train_pids
        camid += num_train_cams
        newidd = 0
        train.append((path, folder, pid, camid, subid, countid))
        #print(train)
        #break

    num_train_pids += dataset.num_train_pids
    num_train_cams += dataset.num_train_cams

    pid = 0
    pidx = {}
    for img_path, pid, camid, subid, countid in dataset.train:
        path = img_path[56:90 + 6]

        folder = path[1:4]
        pidx[folder] = pid
        pid += 1
    #print(pidx)

    sub = []
    final = 0
    xx = dataset.train
    newids = []
    print(train[0:2])
    train2 = {}
    for k in range(0, 770):
        for img_path, pid, camid, subid, countid in dataset.train:
            if k == pid:
                newid = final + subid
                sub.append(newid)
                #print(pid,subid,newid)
                newids.append(newid)
                train2[img_path] = newid
                #print(img_path, pid, camid, subid, countid, newid)

        final = max(sub)
        #print(final)
    print(len(newids), final)

    #train=train2
    #print(train2)
    train3 = []
    for img_path, pid, camid, subid, countid in dataset.train:
        #print(img_path,pid,train2[img_path])
        path = img_path[56:90 + 6]
        #print(path)
        folder = path[1:4]
        newid = train2[img_path]
        #print((path, folder, pid, camid, subid, countid,newid ))
        train3.append((path, folder, pid, camid, subid, countid, newid))

    train = train3

    # for (path, folder, pid, camid, subid, countid,newid) in train:
    #     print(path, folder)

    #path = '/home/kuru/Desktop/adhi/veri-final-draft-master_noise/gmsNoise776/'
    path = '/home/kuru/Desktop/veri-gms-master/gms/'
    pkl = {}
    #pkl[0] = pickle.load('/home/kuru/Desktop/veri-gms-master/gms/620.pkl')

    entries = os.listdir(path)
    for name in entries:
        f = open((path + name), 'rb')
        ccc = (path + name)
        #print(ccc)
        if name == 'featureMatrix.pkl':
            s = name[0:13]
        else:
            s = name[0:3]
        #print(s)
        #with open (ccc,"rb") as ff:
        #    pkl[s] = pickle.load(ff)
        #print(pkl[s])
        pkl[s] = pickle.load(f)
        f.close
        #print(len(pkl))

    print('=> pickle indexing')

    data_index = search(pkl)
    print(len(data_index))

    transform_t = train_transforms(**transform_kwargs)
    #print(train[0],train[10])

    #data_tfr = vd(pkl_file='index.pkl', dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master/VeRi/image_train/', transform=transform_t)
    data_tfr = vdspan(
        pkl_file='index_veryspan.pkl',
        dataset=train,
        root_dir='/home/kuru/Desktop/veri-gms-master/VeRispan/image_train/',
        transform=transform_t)
    #print(data_tfr)
    #print(trainloader)
    #data_tfr2=list(data_tfr)
    print("lllllllllllllllllllllllllllllllllllllllllllline 433")
    df2 = []
    data_tfr_old = data_tfr
    for (img, label, index, pid, cid, subid, countid, newid) in data_tfr:
        #print((img,label,index,pid, cid,subid,countid,newid) )
        #print("datframe",(label))
        #print(countid)
        if countid > 4:
            #print(countid)
            df2.append((img, label, index, pid, cid, subid, countid, newid))
    print("filtered final trainset length", len(df2))

    data_tfr = df2

    # with open('df2noise_ex.pkl', 'wb') as handle:
    #     b = pickle.dump(df2, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # with open('df2noise.pkl', 'rb') as handle:
    #     df2 = pickle.load(handle)
    # data_tfr=df2
    # for (img,label,index,pid, cid,subid,countid,newid) in data_tfr :
    #     print("datframe",(label))

    #data_tfr = vdspansort( dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/', transform=transform_t)

    #trainloader = DataLoader(df2, sampler=None,batch_size=train_batch_size, shuffle=True, num_workers=workers,pin_memory=True, drop_last=True)
    trainloader = DataLoader(data_tfr,
                             sampler=None,
                             batch_size=train_batch_size,
                             shuffle=True,
                             num_workers=workers,
                             pin_memory=True,
                             drop_last=True)

    for batch_idx, (img, label, index, pid, cid, subid, countid,
                    newid) in enumerate(trainloader):
        #print("trainloader",batch_idx, (label,index,pid, cid,subid,countid,newid))
        print("trainloader", batch_idx, (label))
        break

    print('Initializing test data manager')
    dm = ImageDataManager(use_gpu, **dataset_kwargs)
    testloader_dict = dm.return_dataloaders()

    print('Initializing model: {}'.format(arch))
    model = models.init_model(name=arch,
                              num_classes=num_train_pids,
                              loss={'xent', 'htri'},
                              pretrained=not no_pretrained,
                              last_stride=2)
    print('Model size: {:.3f} M'.format(count_num_param(model)))

    if load_weights is not None:
        print("weights loaded")
        load_pretrained_weights(model, load_weights)

    #checkpoint = torch.load('/home/kuru/Desktop/veri-gms-master/logg/model.pth.tar-19')
    #model._load_from_state_dict(checkpoint['state_dict'])
    #model.load_state_dict(checkpoint['state_dict'])

    #optimizer.load_state_dict(checkpoint['optimizer'])
    #print(checkpoint['epoch'])
    #print(checkpoint['rank1'])
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    print(torch.cuda.device_count())
    model = nn.DataParallel(model).cuda() if use_gpu else model
    optimizer = init_optimizer(model, **optimizer_kwargs)

    #optimizer = init_optimizer(model)
    #optimizer.load_state_dict(checkpoint['optimizer'])

    scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs)
    # scheduler = WarmupMultiStepLR(optimizer, STEPS, GAMMA,
    #                               WARMUP_FACTOR,
    #                               WARMUP_EPOCHS, WARMUP_METHOD)

    criterion_xent = CrossEntropyLoss(num_classes=num_train_pids,
                                      use_gpu=use_gpu,
                                      label_smooth=True)
    criterion_htri = TripletLoss(margin=margin)
    ranking_loss = nn.MarginRankingLoss(margin=margin)

    if evaluate:
        print('Evaluate only')

        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            _, distmat = test(model,
                              queryloader,
                              galleryloader,
                              train_batch_size,
                              use_gpu,
                              return_distmat=True)

            if vis_rank:
                visualize_ranked_results(distmat,
                                         dm.return_testdataset_by_name(name),
                                         save_dir=osp.join(
                                             save_dir, 'ranked_results', name),
                                         topk=20)
        return

    time_start = time.time()
    ranklogger = RankLogger(source, target)

    # # checkpoint = torch.load('/home/kuru/Desktop/market_all/ibna_model/model.pth.tar-79')
    # # model.load_state_dict(checkpoint['state_dict'])
    # # optimizer.load_state_dict(checkpoint['optimizer'])
    # # print(checkpoint['epoch'])
    # # start_epoch=checkpoint['epoch']
    # # start=start_epoch

    # checkpoint = torch.load('/home/kuru/Desktop/veri-gms-master/spanningtreeveri/model.pth.tar-2')
    # model.load_state_dict(checkpoint['state_dict'])
    # optimizer.load_state_dict(checkpoint['optimizer'])
    # print(checkpoint['epoch'])
    # start_epoch=checkpoint['epoch']
    # start=start_epoch

    ##start_epoch=resume_from_checkpoint('/home/kuru/Desktop/veri-gms-master/logg/model.pth.tar-20', model, optimizer=None)
    print('=> Start training')

    for epoch in range(start, max_epoch):
        print(epoch, scheduler.get_lr()[0])
        #print( torch.cuda.memory_allocated(0))
        losses = AverageMeter()
        #xent_losses = AverageMeter()
        htri_losses = AverageMeter()
        accs = AverageMeter()
        batch_time = AverageMeter()
        xent_losses = AverageMeter()

        model.train()
        for p in model.parameters():
            p.requires_grad = True  # open all layers

        end = time.time()
        for batch_idx, (img, label, index, pid, cid, subid, countid,
                        newid) in enumerate(trainloader):
            trainX, trainY = torch.zeros(
                (train_batch_size * 3, 3, height, width),
                dtype=torch.float32), torch.zeros((train_batch_size * 3),
                                                  dtype=torch.int64)
            #pids = torch.zeros((batch_size*3), dtype = torch.int16)
            #batchcount=0
            for i in range(train_batch_size):
                if (countid[i] > 4):
                    #batchcount=batchcount+1
                    #print("dfdsfs")
                    labelx = label[i]
                    indexx = index[i]
                    cidx = pid[i]
                    if indexx > len(pkl[labelx]) - 1:
                        indexx = len(pkl[labelx]) - 1

                    #maxx = np.argmax(pkl[labelx][indexx])
                    a = pkl[labelx][indexx]
                    minpos = np.argmin(ma.masked_where(a == 0, a))

                    # print(len(a))
                    # print(a)
                    # print(ma.masked_where(a==0, a))
                    # print(labelx,index,pid,cidx,minpos)
                    # print(np.array(data_index).shape)
                    # print(data_index[cidx][1])
                    pos_dic = data_tfr_old[data_index[cidx][1] + minpos]
                    #print('posdic', pos_dic)

                    neg_label = int(labelx)
                    while True:
                        neg_label = random.choice(range(1, 770))
                        #print(neg_label)
                        if neg_label is not int(labelx) and os.path.isdir(
                                os.path.join(
                                    '/home/kuru/Desktop/veri-gms-master_noise/veriNoise_train_spanning_folder',
                                    strint(neg_label))) is True:
                            break
                    negative_label = strint(neg_label)
                    neg_cid = pidx[negative_label]
                    neg_index = random.choice(
                        range(0, len(pkl[negative_label])))
                    #print(negative_label,neg_cid,neg_index,data_index[neg_cid] )
                    neg_dic = data_tfr_old[data_index[neg_cid][1] + neg_index]
                    #print('negdic', neg_dic)
                    trainX[i] = img[i]
                    trainX[i + train_batch_size] = pos_dic[0]
                    trainX[i + (train_batch_size * 2)] = neg_dic[0]
                    trainY[i] = cidx
                    trainY[i + train_batch_size] = pos_dic[3]
                    trainY[i + (train_batch_size * 2)] = neg_dic[3]
                    # trainY[i+train_batch_size] = pos_dic[7]
                    # trainY[i+(train_batch_size*2)] = neg_dic[7]
                #break
                # else:
                #     print("skiped",countid[i],subid[i],label[i])
            #break
            #print(batchcount)
            trainX = trainX.cuda()
            trainY = trainY.cuda()
            outputs, features = model(trainX)
            xent_loss = criterion_xent(outputs[0:train_batch_size],
                                       trainY[0:train_batch_size])
            htri_loss = criterion_htri(features, trainY)
            centerloss = CENTER_LOSS_WEIGHT * center_criterion(
                features, trainY)

            #tri_loss = ranking_loss(features)
            #ent_loss = xent_loss(outputs[0:batch_size], trainY[0:batch_size], num_train_pids)

            loss = htri_loss + xent_loss + centerloss
            loss = htri_loss + xent_loss

            optimizer.zero_grad()
            optimizer_center.zero_grad()
            loss.backward()
            optimizer.step()
            # for param in center_criterion.parameters():
            #     param.grad.data *= (1. /CENTER_LOSS_WEIGHT)
            # optimizer_center.step()

            for param_group in optimizer.param_groups:
                #print(param_group['lr'] )
                lrrr = str(param_group['lr'])

            batch_time.update(time.time() - end)
            losses.update(loss.item(), trainY.size(0))
            htri_losses.update(htri_loss.item(), trainY.size(0))
            xent_losses.update(xent_loss.item(), trainY.size(0))
            accs.update(
                accuracy(outputs[0:train_batch_size],
                         trainY[0:train_batch_size])[0])

            if (batch_idx) % 50 == 0:
                print('Train ', end=" ")
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'TriLoss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'XLoss {xloss.val:.4f} ({xloss.avg:.4f})\t'
                      'OveralLoss {oloss.val:.4f} ({oloss.avg:.4f})\t'
                      'Acc {acc.val:.2f} ({acc.avg:.2f})\t'
                      'lr {lrrr} \t'.format(
                          epoch + 1,
                          batch_idx + 1,
                          len(trainloader),
                          batch_time=batch_time,
                          loss=htri_losses,
                          xloss=xent_losses,
                          oloss=losses,
                          acc=accs,
                          lrrr=lrrr,
                      ))

            end = time.time()

        # del loss
        # del htri_loss
        # del xent_loss
        # del htri_losses
        # del losses
        # del outputs
        # del features
        # del accs
        # del trainX
        # del trainY

        scheduler.step()
        print('=> Test')
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                #'rank1': rank1,
                'epoch': epoch + 1,
                'arch': arch,
                'optimizer': optimizer.state_dict(),
            },
            save_dir)
        GPUtil.showUtilization()
        print(torch.cuda.memory_allocated(), torch.cuda.memory_cached())
        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            rank1, distmat = test(model, queryloader, galleryloader,
                                  test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank1)
            rank2, distmat2 = test_rerank(model, queryloader, galleryloader,
                                          test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank2)
        del queryloader
        del galleryloader
        del distmat
        print(torch.cuda.memory_allocated(), torch.cuda.memory_cached())
        torch.cuda.empty_cache()

        if (epoch + 1) == max_epoch:
            #if (epoch + 1) % 10 == 0:
            print('=> Test')
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'rank1': rank1,
                    'epoch': epoch + 1,
                    'arch': arch,
                    'optimizer': optimizer.state_dict(),
                }, save_dir)
            for name in target:
                print('Evaluating {} ...'.format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1, distmat = test(model, queryloader, galleryloader,
                                      test_batch_size, use_gpu)
                ranklogger.write(name, epoch + 1, rank1)
                # del queryloader
                # del galleryloader
                # del distmat

                if vis_rank:
                    visualize_ranked_results(
                        distmat,
                        dm.return_testdataset_by_name(name),
                        save_dir=osp.join(save_dir, 'ranked_results', name),
                        topk=20)
示例#8
0
def main():
    #GENERAL
    torch.cuda.empty_cache()

    root = "/home/kuru/Desktop/veri-gms-master_noise/"
    train_dir = '/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/'
    source = {'verispan'}
    target = {'verispan'}
    workers = 4
    height = 280
    width  = 280
    train_size = 32
    train_sampler = 'RandomSampler'

    #AUGMENTATION
    random_erase = True
    jitter = True
    aug = True

    #OPTIMIZATION
    opt = 'adam'
    lr = 0.0003
    weight_decay = 5e-4
    momentum = 0.9
    sgd_damp = 0.0
    nesterov = True
    warmup_factor = 0.01
    warmup_method = 'linear'

    #HYPERPARAMETER
    max_epoch = 80
    start = 0
    train_batch_size = 8
    test_batch_size = 100

    #SCHEDULER
    lr_scheduler = 'multi_step'
    stepsize = [30, 60]
    gamma = 0.1

    #LOSS
    margin = 0.3
    num_instances = 4
    lambda_tri = 1

    #MODEL
    #arch = 'resnet101'
    arch='resnet101_ibn_a'
    no_pretrained = False

    #TEST SETTINGS
    load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth'
    #load_weights = None
    start_eval = 0
    eval_freq = -1

    #MISC
    use_gpu = True
    print_freq = 10
    seed = 1
    resume = ''
    save_dir = '/home/kuru/Desktop/veri-gms-master_noise/spanningtree_verinoise_101_stride2/'
    gpu_id = 0,1
    vis_rank = True
    query_remove = True
    evaluate = False

    dataset_kwargs = {
        'source_names': source,
        'target_names': target,
        'root': root,
        'height': height,
        'width': width,
        'train_batch_size': train_batch_size,
        'test_batch_size': test_batch_size,
        'train_sampler': train_sampler,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
        }
    transform_kwargs = {
        'height': height,
        'width': width,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
    }

    optimizer_kwargs = {
        'optim': opt,
        'lr': lr,
        'weight_decay': weight_decay,
        'momentum': momentum,
        'sgd_dampening': sgd_damp,
        'sgd_nesterov': nesterov
        }

    lr_scheduler_kwargs = {
        'lr_scheduler': lr_scheduler,
        'stepsize': stepsize,
        'gamma': gamma
        }
    
    use_gpu = torch.cuda.is_available()
    log_name = 'log_test.txt' if evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(save_dir, log_name))
    print('Currently using GPU ', gpu_id)
    cudnn.benchmark = True

    print('Initializing image data manager')
    dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master_noise/', name='verispan')
    train = []
    num_train_pids = 0
    num_train_cams = 0

    print(len( dataset.train))

    for img_path, pid, camid, subid, countid in dataset.train:
        #print(img_path)
        path = img_path[56+6:90+6]
        #print(path)
        folder = path[1:4]
        #print(folder)
        pid += num_train_pids
        newidd=0
        train.append((path, folder, pid, camid,subid,countid))

    num_train_pids += dataset.num_train_pids
    num_train_cams += dataset.num_train_cams

    pid = 0
    pidx = {}
    for img_path, pid, camid, subid, countid in dataset.train:
        path = img_path[56+6:90+6]
        
        folder = path[1:4]
        pidx[folder] = pid
        pid+= 1

    sub=[]
    final=0
    xx=dataset.train
    newids=[]
    print(train[0:2])
    train2={}
    for k in range(0,770):
        for img_path, pid, camid, subid, countid in dataset.train:
            if k==pid:
                newid=final+subid
                sub.append(newid)
                #print(pid,subid,newid)
                newids.append(newid)
                train2[img_path]= newid
                #print(img_path, pid, camid, subid, countid, newid)

                

        final=max(sub)
        #print(final)
    print(len(newids),final)

    #train=train2
    #print(train2)
    train3=[]
    for img_path, pid, camid, subid, countid in dataset.train:
        #print(img_path,pid,train2[img_path])
        path = img_path[56:90+6]
        #print(path)
        folder = path[1:4]
        newid=train2[img_path]
        #print((path, folder, pid, camid, subid, countid,newid ))
        train3.append((path, folder, pid, camid, subid, countid,newid ))

    train = train3

    
    path = '/home/kuru/Desktop/adhi/veri-final-draft-master_noise/gmsNoise776/'
    pkl = {}
    #pkl[0] = pickle.load('/home/kuru/Desktop/veri-gms-master/gms/620.pkl')

    entries = os.listdir(path)
    for name in entries:
        f = open((path+name), 'rb')
        ccc=(path+name)
        #print(ccc)
        if name=='featureMatrix.pkl':
            s = name[0:13]
        else:
            s = name[0:3]
        #print(s)
        #with open (ccc,"rb") as ff:
        #    pkl[s] = pickle.load(ff)
            #print(pkl[s])
        pkl[s] = pickle.load(f)
        f.close
        #print(len(pkl))

    with open('cids.pkl', 'rb') as handle:
        b = pickle.load(handle)
        #print(b)

    with open('index.pkl', 'rb') as handle:
        c = pickle.load(handle)



    transform_t = train_transforms(**transform_kwargs)

    data_tfr = vdspan(pkl_file='index_veryspan_noise.pkl', dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/', transform=transform_t)
    print("lllllllllllllllllllllllllllllllllllllllllllline 433")
    df2=[]
    data_tfr_old=data_tfr
    for (img,label,index,pid, cid,subid,countid,newid) in data_tfr :
        #print((img,label,index,pid, cid,subid,countid,newid) )
        #print("datframe",(label))
        #print(countid)
        if countid > 4 :
            #print(countid)
            df2.append((img,label,index,pid, cid,subid,countid,newid))
    print("filtered final trainset length",len(df2))
    
    data_tfr=df2
    
    
    
    
    trainloader = DataLoader(data_tfr, sampler=None,batch_size=train_batch_size, shuffle=True, num_workers=workers,pin_memory=True, drop_last=True)

    #data_tfr = vd(pkl_file='index.pkl', dataset = train, root_dir=train_dir,transform=transforms.Compose([Rescale(64),RandomCrop(32),ToTensor()]))
    #dataloader = DataLoader(data_tfr, batch_size=batch_size, shuffle=False, num_workers=0)

    for batch_idx, (img,label,index,pid, cid,subid,countid,newid) in enumerate(trainloader):
        #print("trainloader",batch_idx, (label,index,pid, cid,subid,countid,newid))
        print("trainloader",batch_idx, (label))
        break

    print('Initializing test data manager')
    dm = ImageDataManager(use_gpu, **dataset_kwargs)
    testloader_dict = dm.return_dataloaders()

    print('Initializing model: {}'.format(arch))
    model = models.init_model(name=arch, num_classes=num_train_pids, loss={'xent', 'htri'},
                              pretrained=not no_pretrained, last_stride =2 )
    print('Model size: {:.3f} M'.format(count_num_param(model)))

    if load_weights is not None:
        print("weights loaded")
        load_pretrained_weights(model, load_weights)

    print(torch.cuda.device_count())
    model = nn.DataParallel(model).cuda() if use_gpu else model
    optimizer = init_optimizer(model, **optimizer_kwargs)
    #optimizer = init_optimizer(model)
    
    scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs)

    criterion_xent = CrossEntropyLoss(num_classes=num_train_pids, use_gpu=use_gpu, label_smooth=True)
    criterion_htri = TripletLoss(margin=margin)
    ranking_loss = nn.MarginRankingLoss(margin = margin)

    if evaluate:
        print('Evaluate only')

        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            _, distmat = test(model, queryloader, galleryloader, train_batch_size, use_gpu, return_distmat=True)

            if vis_rank:
                visualize_ranked_results(
                    distmat, dm.return_testdataset_by_name(name),
                    save_dir=osp.join(save_dir, 'ranked_results', name),
                    topk=20
                )
        return    

    time_start = time.time()
    ranklogger = RankLogger(source, target)
    print('=> Start training')

    data_index = search(pkl)
    print(len(data_index))
    
    for epoch in range(start, max_epoch):
        losses = AverageMeter()
        #xent_losses = AverageMeter()
        htri_losses = AverageMeter()
        accs = AverageMeter()
        batch_time = AverageMeter()
        xent_losses=AverageMeter()

        model.train()
        for p in model.parameters():
            p.requires_grad = True    # open all layers

        end = time.time()
        for batch_idx,  (img,label,index,pid, cid,subid,countid,newid)  in enumerate(trainloader):
            trainX, trainY = torch.zeros((train_batch_size*3,3,height, width), dtype=torch.float32), torch.zeros((train_batch_size*3), dtype = torch.int64)
            #pids = torch.zeros((batch_size*3), dtype = torch.int16)
            for i in range(train_batch_size):
                #print("dfdsfs")
                labelx = label[i]
                indexx = index[i]
                cidx = pid[i]
                if indexx >len(pkl[labelx])-1:
                    indexx = len(pkl[labelx])-1

                #maxx = np.argmax(pkl[labelx][indexx])
                a = pkl[labelx][indexx]
                minpos = np.argmin(ma.masked_where(a==0, a)) 
                #print(minpos)
                #print(np.array(data_index).shape)
                #print(data_index[cidx][1])
                pos_dic = data_tfr_old[data_index[cidx][1]+minpos]

                neg_label = int(labelx)
                while True:
                    neg_label = random.choice(range(1, 770))
                    #print(neg_label)
                    if neg_label is not int(labelx) and os.path.isdir(os.path.join('/home/kuru/Desktop/adiusb/veri-split/train', strint(neg_label))) is True:
                        break
                negative_label = strint(neg_label)
                neg_cid = pidx[negative_label]
                neg_index = random.choice(range(0, len(pkl[negative_label])))

                neg_dic = data_tfr_old[data_index[neg_cid][1]+neg_index]
                trainX[i] = img[i]
                trainX[i+train_batch_size] = pos_dic[0]
                trainX[i+(train_batch_size*2)] = neg_dic[0]
                trainY[i] = cidx
                trainY[i+train_batch_size] = pos_dic[3]
                trainY[i+(train_batch_size*2)] = neg_dic[3]
            
            trainX = trainX.cuda()
            trainY = trainY.cuda()
            outputs, features = model(trainX)
            xent_loss = criterion_xent(outputs[0:train_batch_size], trainY[0:train_batch_size])
            htri_loss = criterion_htri(features, trainY)

            #tri_loss = ranking_loss(features)
            #ent_loss = xent_loss(outputs[0:batch_size], trainY[0:batch_size], num_train_pids)
            
            loss = htri_loss+xent_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            losses.update(loss.item(), trainY.size(0))
            htri_losses.update(htri_loss.item(), trainY.size(0))
            xent_losses.update(xent_loss.item(), trainY.size(0))
            accs.update(accuracy(outputs[0:train_batch_size], trainY[0:train_batch_size])[0])
    
            if (batch_idx) % 50 == 0:
                print('Train ', end=" ")
                print('Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'TriLoss {loss.val:.4f} ({loss.avg:.4f})\t'
                    'XLoss {xloss.val:.4f} ({xloss.avg:.4f})\t'
                    'OveralLoss {oloss.val:.4f} ({oloss.avg:.4f})\t'
                    'Acc {acc.val:.2f} ({acc.avg:.2f})\t'
                    'lr {lrrr} \t'.format(
                    epoch + 1, batch_idx + 1, len(trainloader),
                    batch_time=batch_time,
                    loss = htri_losses,
                    xloss = xent_losses,
                    oloss = losses,
                    acc=accs ,
                    lrrr=lrrr,
                ))
                

            end = time.time()

        
        scheduler.step()            
        print('=> Test')

        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            rank1, distmat = test(model, queryloader, galleryloader, test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank1)
            rank2, distmat2 = test_rerank(model, queryloader, galleryloader, test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank2)
            
        #if (epoch + 1) == max_epoch:
        if (epoch + 1) % 2 == 0:
            print('=> Test')

            for name in target:
                print('Evaluating {} ...'.format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1, distmat = test(model, queryloader, galleryloader, test_batch_size, use_gpu)
                ranklogger.write(name, epoch + 1, rank1)

                # if vis_rank:
                #     visualize_ranked_results(
                #         distmat, dm.return_testdataset_by_name(name),
                #         save_dir=osp.join(save_dir, 'ranked_results', name),
                #         topk=20)

            save_checkpoint({
                'state_dict': model.state_dict(),
                'rank1': rank1,
                'epoch': epoch + 1,
                'arch': arch,
                'optimizer': optimizer.state_dict(),
            }, save_dir)
        return content

    def start(self):
        base_url = 'http://data.eastmoney.com/kzz/default.html'
        try:
            text = self.connect_url(base_url)
            try:
                useful_bond_map = self.parse(text)
                content = self.get_text_content(useful_bond_map)
            except KeyError or AttributeError or JSONDecodeError:
                log.logger.error("There is a error:\n" + text +
                                 "\n and Error Message: " +
                                 traceback.extract_stack())
                content = "Maybe There Are Some Mistakes Here"
        except ConnectionError:
            content = "Maybe There Are Some Mistakes Here"

        SendEmailByGoogleMail(
            subject="今日可转债申购/上市情况",
            username="******",
            password="",
            receivers=['*****@*****.**'],
        ).send_mail(way='common', content=content, files=None)


if __name__ == '__main__':
    fetch_day = time.strftime("%Y-%m-%d", time.localtime())
    log = Logger(abspath + '/logs/Monitor' + fetch_day + '.log', level='info')
    requests.packages.urllib3.disable_warnings()
    GetConvertibleBondInfo().start()
示例#10
0
def main():
    global args
    global best_prec1, best_prec5
    global checkpoint_fold, checkpoint_best


    sys.stdout = Logger(osp.join(checkpoint_fold, 'log_train.txt'))

    writer = SummaryWriter('save_model/{}/{}/{}'.format(args.dataset, args.arch, args.checkpoints))
    # simple args
    debug = args.debug
    if debug: cprint('=> WARN: Debug Mode', 'yellow')

    dataset = args.dataset


    if dataset == 'cub':
        num_classes = 200
        base_size = 512
        #batch_size = 60
        batch_size = 48
        crop_size = 448
        pool_size = 14
        args.warmup = True
        pretrain = args.pretrained
        args.backbone = 'resnet'
        args.arch = '50'

        epochs = 100
        eval_freq = 5
        args.lr = 0.01
        lr_drop_epoch_list = [31, 61, 81]
    elif dataset == 'cifar10':
        num_classes = 10
        base_size = 32
        batch_size = 128
        crop_size = 32
        args.warmup = True
        pretrain = args.pretrained
        #args.backbone = 'resnet'
        epochs = 300
        eval_freq = 5
        args.lr = 0.1
        lr_drop_epoch_list = [150, 250]

        if args.backbone == 'preresnet':
            pool_size = 8
        else:
            pool_size = 4

    elif dataset == 'cifar100':
        num_classes = 100
        base_size = 32
        batch_size = 128
        crop_size = 32
        args.warmup = True
        pretrain = args.pretrained
        epochs = 300
        eval_freq = 5
        args.lr = 0.1
        lr_drop_epoch_list = [150, 250]

        if args.backbone == 'preresnet':
            pool_size = 8
        else:
            pool_size = 4

    
    else: ##imagenet
        num_classes = 1000
        base_size = 256
        batch_size = 100
        crop_size = 224
        pool_size = 7
        args.warmup = True
        pretrain = args.pretrained
        args.backbone = 'resnet'

        epochs = 100
        eval_freq = 5
        args.lr = 0.01
        lr_drop_epoch_list = [31, 61, 81]
    
    workers = 4

    if debug:
        batch_size = 2
        workers = 0


    if base_size == 512 and \
        args.arch == '152':
        batch_size = 128
    drop_ratio = 0.1
    gpu_ids = [0,1]

    # args for the nl and cgnl block
    arch = args.arch
    nl_type  = args.nl_type # 'cgnl' | 'cgnlx' | 'nl'
    nl_nums  = args.nl_nums # 1: stage res4

    # warmup setting
    WARMUP_LRS = [args.lr * (drop_ratio**len(lr_drop_epoch_list)), args.lr]
    WARMUP_EPOCHS = 10

    # data loader
    if dataset == 'cub':
        data_root = os.path.join(args.data_dir, 'cub')
        imgs_fold = os.path.join(data_root, 'images')
        train_ann_file = os.path.join(data_root, 'cub_train.list')
        valid_ann_file = os.path.join(data_root, 'cub_val.list')
    elif dataset == 'imagenet':
        data_root = '/home/sheqi/lei/dataset/imagenet'
        imgs_fold = os.path.join(data_root)
        train_ann_file = os.path.join(data_root, 'imagenet_train.list')
        valid_ann_file = os.path.join(data_root, 'imagenet_val.list')
    elif dataset == 'cifar10':
        print("cifar10")
    elif dataset == 'cifar100':
        print("cifar100")
    else:
        raise NameError("WARN: The dataset '{}' is not supported yet.")

    if dataset == 'cub' or dataset == 'imagenet':
        train_dataset = dataloader.ImgLoader(
                root = imgs_fold,
                ann_file = train_ann_file,
                transform = transforms.Compose([
                    transforms.RandomResizedCrop(
                        size=crop_size, scale=(0.08, 1.25)),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        [0.485, 0.456, 0.406],
                        [0.229, 0.224, 0.225])
                    ]))

        val_dataset = dataloader.ImgLoader(
                root = imgs_fold,
                ann_file = valid_ann_file,
                transform = transforms.Compose([
                    transforms.Resize(base_size),
                    transforms.CenterCrop(crop_size),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        [0.485, 0.456, 0.406],
                        [0.229, 0.224, 0.225])
                    ]))

        train_loader = torch.utils.data.DataLoader(
                train_dataset,
                batch_size = batch_size,
                shuffle = True,
                num_workers = workers,
                pin_memory = True)

        val_loader = torch.utils.data.DataLoader(
                val_dataset,
                batch_size = batch_size,
                shuffle = False,
                num_workers = workers,
                pin_memory = True)

    elif dataset == 'cifar10':
        train_transform = transforms.Compose([
                    transforms.RandomCrop(crop_size, padding=4),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        [0.4914, 0.4822, 0.4465],
                        [0.2023, 0.1994, 0.2010])
                    ])
        val_transform = transforms.Compose([
                    #transforms.Resize(base_size),
                    #transforms.CenterCrop(crop_size),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        [0.4914, 0.4822, 0.4465],
                        [0.2023, 0.1994, 0.2010])
                    ])
        trainset = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=False , transform = train_transform)
        train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
        testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False, download=False , transform = val_transform)
        val_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True)
    elif dataset == 'cifar100':
        train_transform = transforms.Compose([
                    transforms.RandomCrop(crop_size, padding=4),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        [0.4914, 0.4822, 0.4465],
                        [0.2023, 0.1994, 0.2010])
                    ])
        val_transform = transforms.Compose([
                    #transforms.Resize(base_size),
                    #transforms.CenterCrop(crop_size),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        [0.4914, 0.4822, 0.4465],
                        [0.2023, 0.1994, 0.2010])
                    ])
        trainset = torchvision.datasets.CIFAR100(root=args.data_dir, train=True, download=False , transform = train_transform)
        train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
        testset = torchvision.datasets.CIFAR100(root=args.data_dir, train=False, download=False , transform = val_transform)
        val_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True)
    # build model

#####################################################
    if args.backbone == 'resnet':
        model = resnet_snl.model_hub(arch,
                                 pretrained=pretrain,
                                 nl_type=nl_type,
                                 nl_nums=nl_nums,
                                 stage_num=args.stage_nums,
                                 pool_size=pool_size, div=args.div, isrelu=args.relu)
    elif args.backbone == 'preresnet':
        model = preresnet_snl.model_hub(arch,
                                 pretrained=pretrain,
                                 nl_type=nl_type,
                                 nl_nums=nl_nums,
                                 stage_num=args.stage_nums,
                                 pool_size=pool_size, 
                                 div=args.div,
                                 nl_layer = args.nl_layer,
                                 relu = args.relu)
    else:
        raise KeyError("Unsupported nonlocal type: {}".format(nl_type))
####################################################


    # change the first conv for CIFAR
    if dataset == 'cifar10' or dataset == 'cifar100':
         model._modules['conv1'] = torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
                               bias=False)
         model._modules['maxpool'] = torch.nn.Sequential()

    # change the fc layer
    if dataset != 'imagenet':
        model._modules['fc'] = torch.nn.Linear(in_features=2048,
                                           out_features=num_classes)
        torch.nn.init.kaiming_normal_(model._modules['fc'].weight,
                                  mode='fan_out', nonlinearity='relu')
    print(model)

    # parallel
    if args.num_gpu > 1:
        model = torch.nn.DataParallel(model, device_ids=gpu_ids).cuda()
    else:
        model = model.cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # optimizer
    optimizer = torch.optim.SGD(
            model.parameters(),
            args.lr,
            momentum=0.9,
            weight_decay=1e-4)

    # cudnn
    cudnn.benchmark = True

    # warmup
    if args.warmup:
        epochs += WARMUP_EPOCHS
        lr_drop_epoch_list = list(
                np.array(lr_drop_epoch_list) + WARMUP_EPOCHS)
        cprint('=> WARN: warmup is used in the first {} epochs'.format(
            WARMUP_EPOCHS), 'yellow')


    start_epoch = 0
    if args.isresume:
        print('loading checkpoint {}'.format(resume_path))
        checkpoint = torch.load(resume_path)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        print("epoches: {}, best_prec1: {}". format(start_epoch, best_prec1 ))


    # valid
    if args.valid:
        cprint('=> WARN: Validation Mode', 'yellow')
        print('start validation ...')
        print('=> loading state_dict from {}'.format(args.check_path))
        model.load_state_dict(
                torch.load(args.check_path)['state_dict'], strict=True)
        prec1, prec5 = validate(val_loader, model, criterion)
        print(' * Final Accuracy: Prec@1 {:.3f}, Prec@5 {:.3f}'.format(prec1, prec5))
        exit(0)

    # train
    print('start training ...')
    for epoch in range(start_epoch, epochs):
        current_lr = adjust_learning_rate(optimizer, drop_ratio, epoch, lr_drop_epoch_list,
                                          WARMUP_EPOCHS, WARMUP_LRS)
        # train one epoch
        cur_loss = train(train_loader, model, criterion, optimizer, epoch, epochs, current_lr)
        writer.add_scalar("Train Loss", cur_loss, epoch + 1)

        if nl_nums > 0:
            checkpoint_name = '{}-{}-r-{}-w-{}{}-block.pth.tar'.format(epoch, dataset, arch, nl_nums, nl_type)
        else:
            checkpoint_name = '{}-r-{}-{}-base.pth.tar'.format(dataset, arch, epoch)

        checkpoint_name = os.path.join(checkpoint_fold, checkpoint_name)

        if (epoch + 1) % eval_freq == 0:
            prec1, prec5 = validate(val_loader, model, criterion)
##########################################################
            writer.add_scalar("Top1", prec1, epoch + 1)
            writer.add_scalar("Top5", prec5, epoch + 1)
##########################################################
            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            best_prec5 = max(prec5, best_prec5)
            print(' * Best accuracy: Prec@1 {:.3f}, Prec@5 {:.3f}'.format(best_prec1, best_prec5))
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer' : optimizer.state_dict(),
            }, is_best, filename=checkpoint_name)
示例#11
0
                    log.logger.error("Connect to url Error: " + url)
                    continue
                if name == "xici":
                    self.parse_xc(text)
                else:
                    self.parse_kd(text)
                time.sleep(random.uniform(3, 4))

        self.db_client.close()

    def delete_error_ip(self):
        """每天首先删除库中没用的ip地址"""
        select_result = self.db_client.select_ip()
        num_count, all_count = 0, 0
        for temp in select_result:
            all_count += 1
            ip_status = self.test_ip_useful(temp[0], temp[1])
            if ip_status is False:
                self.db_client.delete_ip(temp[0], temp[1])
                num_count += 1
        log.logger.info("Delete Form table Success nums: " + str(num_count) + " And All IP nums: " + str(all_count))


if __name__ == '__main__':
    log_day = time.strftime("%Y-%m-%d", time.localtime())
    log = Logger(abspath + '/logs/GetIPProxies' + log_day + '.log', level='info')
    # 先删除库中没用的IP地址
    GetIPProxies().delete_error_ip()
    # 再往库中增加有用的IP地址
    GetIPProxies().start()
示例#12
0
@File :basepage.py
@Email:[email protected]
"""
import json
import os
import allure
import yaml
from time import sleep
from typing import Dict, List
from appium.webdriver.common.mobileby import MobileBy
from appium.webdriver.webdriver import WebDriver
from selenium.webdriver.common.by import By
from utils import setting
from utils.loggers import Logger

log = Logger()
shot_path = setting.SCREEN_SHOT


class BasePage:
    _params = {}
    _error_count = 0
    _error_max = 10
    _black_list = [(By.XPATH, "//*[@text='关闭']"),
                   (By.XPATH, "//*[@resource-id='com.tencent.wework:id/bpc']")]

    def __init__(self, driver: WebDriver = None):
        self.driver = driver

    def finds(self, by, locator=None):
        elements = self.driver.find_elements(by, locator)