Exemple #1
0
def main():
    opts.parse_args()
    #print opts.resolved
    if opts.resolved.verbose: log("massaging input...")
    p = process(opts.inputs)
    if opts.resolved.verbose: log("parsing...")
    p = parse_program(p)
    if opts.resolved.verbose: log("processing...")
    run_chain(p)
    if opts.resolved.verbose: log("dumping output...")
    
    if opts.resolved.output == '-':
        out = sys.stdout
    else:
        out = file(opts.resolved.output, 'w')
    p.accept(PrintVisitor(stream = out))
def opt():
    args = [
        '--model', 'SimpleNet', '--dataset', 'CIFAR10', '--data_dir',
        '../data', '--num_classes_per_task', '1', '--num_tasks', '3',
        '--num_pretrain_classes', '3', '--num_pretrain_passes', '1',
        '--num_loops', '1'
    ]
    args = parse_args(args)
    return args
Exemple #3
0
def caculate_from_txt():
    args = parse_args()

    #txt_path = Path()
    txt_path = Path(os.path.dirname(__file__)) / args.txt_path
    dirs = readlines(txt_path)

    files_ls = [Path(dr).files() for dr in dirs]
    lines = []
    line = []
    batch = []
    for files in files_ls:
        for img_p in files:
            img = cv2.imread(img_p)
            img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = torch.tensor(img).cuda()
            img = normalize_image(img)
            img = img.reshape(1, 1, img.shape[0], img.shape[1])
            line.append(stat(img))

        line = torch.tensor(line)
        line_hist = line.histc(bins=50, min=0, max=1)
        lines.append(line_hist.numpy())
        line = []
    print('ok')

    plt.subplot(2, 2, 1)
    plt.plot(lines[0], 'r')
    plt.subplot(2, 2, 2)
    plt.plot(lines[1], 'g')
    plt.subplot(2, 2, 3)
    plt.plot(lines[2], 'b')

    #plt.subplot(2,2,4)
    #plt.plot(lines[3],'y')
    plt.show()
Exemple #4
0
            torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip) # Always be safe than sorry
            optimizer.step()

            # Log losses
            losses.update(loss.data.item(), labels.size(0))
            batch_time.update(time.time() - start)
            start = time.time()

        logger.info('==> Train:[{0}]\tTime:{batch_time.sum:.4f}\tData:{data_time.sum:.4f}\tLoss:{loss.avg:.4f}\t'
            .format(epoch, batch_time=batch_time, data_time=data_time, loss=losses))
        return model, optimizer


if __name__ == '__main__':
    # Parse arguments
    opt = parse_args()
    seed_everything(seed=opt.seed)

    # Setup logger
    console_logger = get_logger(folder=opt.log_dir+'/'+opt.exp_name+'/')
    
    # Handle fixed class orders. Note: Class ordering code hacky. Would need to manually adjust here to test for different datasets.
    console_logger.debug("==> Loading dataset..")
    class_order = [87, 0, 52, 58, 44, 91, 68, 97, 51, 15, 94, 92, 10, 72, 49, 78, 61, 14, 8, 86, 84, 96, 18, 24, 32, 45, 88, 11, 4, 67, 69, 66, 77, 47, 79, 93, 29, 50, 57, 83, 17, 81, 41, 12, 37, 59, 25, 20, 80, 73, 1, 28, 6, 46, 62, 82, 53, 9, 31, 75, 38, 63, 33, 74, 27, 22, 36, 3, 16, 21, 60, 19, 70, 90, 89, 43, 5, 42, 65, 76, 40, 30, 23, 85, 2, 95, 56, 48, 71, 64, 98, 13, 99, 7, 34, 55, 54, 26, 35, 39] #Currently testing using iCARL test order-- restricted to CIFAR100. For the other class orders refer to https://github.com/arthurdouillard/incremental_learning.pytorch/tree/master/options/data
    if opt.dataset != 'CIFAR100' and opt.dataset !='ImageNet100': class_order=None

    # Handle 'path does not exist errors' 
    if not os.path.isdir(opt.log_dir+'/'+opt.exp_name):
        os.mkdir(opt.log_dir+'/'+opt.exp_name)
    if opt.old_exp_name!='test' and not os.path.isdir(opt.log_dir+'/'+opt.old_exp_name):
        os.mkdir(opt.log_dir+'/'+opt.old_exp_name)    
            if opt.tta:
                im_names, labels = eval_logits_tta(loader,
                                                   model,
                                                   device=device)
            else:
                im_names, labels = eval_logits(loader, model, device)
        im_labels = []
        # print(im_names)
        for name, label in zip(im_names, labels):
            if name in dict_:
                dict_[name].append(label)
            else:
                dict_[name] = [label]

    header = ['filename', 'type']
    utils.mkdir(opt.results_dir)
    result = opt.network + '-' + str(opt.layers) + '-' + str(
        opt.crop_size) + '_result.csv'
    filename = os.path.join(opt.results_dir, result)
    with open(filename, 'w', encoding='utf-8') as f:
        f_csv = csv.writer(f)
        f_csv.writerow(header)
        for key in dict_.keys():
            v = np.argmax(np.sum(np.array(dict_[key]), axis=0)) + 1
            # v = list(np.sum(np.array(dict_[key]), axis=0))
            f_csv.writerow([key, v])


opt = opts.parse_args()
main(opt)
    print('Directory {} complete processing!'.format(day_dir))
    print("Cost {} minutes".format((time.time() - st) / 60))


def main(args=None):
    source_dir = args.source_dir
    cur_dir_name = get_path_leaf(source_dir)
    if re.match('\d\d\d\d-\d\d-\d\d', cur_dir_name):
        day_dir = source_dir
        workflow(day_dir, args)
    else:
        sub_dirs = os.listdir(source_dir)
        day_dirs = []
        for sub_dir in sub_dirs:
            if not re.match('\d\d\d\d-\d\d-\d\d', sub_dir):
                print('Sub directory {} under {} is not a date'.format(
                    sub_dir, source_dir))
            else:
                day_dirs.append(os.path.join(source_dir, sub_dir))
        for day_dir in day_dirs:
            workflow(day_dir, args)


if __name__ == '__main__':
    try:
        multiprocessing.set_start_method('spawn')
    except RuntimeError:
        pass
    main(parse_args())
    writer.add_summary(summary, iteration)


def check_rootfolders(trainid):
    """Create log and model folder"""
    folders_util = [args.root_log, args.root_model, args.root_output]
    if not os.path.exists('./data/results'):
        os.makedirs('./data/results')
    for folder in folders_util:
        if not os.path.exists(os.path.join('./data/results', trainid, folder)):
            print('creating folder ' + folder)
            os.makedirs(os.path.join('./data/results', trainid, folder))

if __name__ == '__main__':

    args = parse_args()
    if args.batch_size != 1:
        print('The batch size should always be 1 for now.')
        raise NotImplementedError
    check_rootfolders(args.train_id)
    summary_w = tf and tf.summary.FileWriter(\
        os.path.join('./data/results', args.train_id, args.root_log))  # tensorboard
    print('Called with args:')
    print(args)

    np.random.seed(args.RNG_SEED)

    torch.backends.cudnn.enabled = False
    if torch.cuda.is_available() and not args.cuda:
        print("WARNING: You have a CUDA device, \
        so you should probably run with --cuda")
Exemple #8
0
        frame_idx = int(random()*len(frames))
        # 为了前一阵后一阵都能取到
        if frame_idx<2:
            frame_idx=2
        if frame_idx >len(frames)-2:
            frame_idx = len(frames)-2


        s+=frames[frame_idx].stem
        if frames[frame_idx].stem =='0000001':
            print('cao!')
        if not s in return_list:
            return_list.append(s)
            i+=1
    lenth = len(return_list)


    train_ = int(train_*lenth)
    val_ = int(val_*lenth) +train_
    test_ = int(test_*lenth)+val_


    writelines(return_list[:train_],train_txt_p)
    writelines(return_list[train_:val_],val_txt_p)
    writelines(return_list[val_:],test_txt_p)



if  __name__ == '__main__':
    options = parse_args()
    generate_mc(options)
            for epoch in range(1, opt.num_pretrain_passes + 1):
                trainer.train(loader=vd.pretrain_loader, model=model, optimizer=optimizer, epoch=epoch)
                acc = trainer.test(loader=vd.pretest_loader, model=model, mask=vd.pretrain_mask, epoch_or_phase=epoch)
            logger.info(f'==> Pretraining completed! Acc: [{acc:.3f}]')
            save_pretrained_model(opt, model)

    if opt.num_tasks > 0:
        # TODO: use another optimizer?
        # Class-Incremental training
        # We start with pretrain mask bvecause in testing we want pretrained classes included
        logger.info(f'==> Starting Class-Incremental training')
        mask = vd.pretrain_mask.clone() if opt.num_pretrain_classes > 0 else torch.zeros(vd.n_classes_in_whole_dataset)
        dataloaders = vd.get_ci_dataloaders()
        cl_accuracy_meter = AverageMeter()
        for phase, (trainloader, testloader, class_list, phase_mask) in enumerate(dataloaders, start=1):
            trainer.train(loader=trainloader, model=model, optimizer=optimizer, phase=phase)

            # accumulate masks, because we want to test on all seen classes
            mask += phase_mask

            # this is the accuracy for all classes seen so far
            acc = trainer.test(loader=testloader, model=model, mask=mask, epoch_or_phase=phase)
            cl_accuracy_meter.update(acc)

        logger.info(f'==> CL training completed! AverageAcc: [{cl_accuracy_meter.avg:.3f}]')


if __name__ == '__main__':
    cmd_opts = parse_args()
    exp1(cmd_opts)