コード例 #1
0
ファイル: SVHNtrain.py プロジェクト: BarcaD12/svhn-maml
def main(args):

    torch.manual_seed(222)
    torch.cuda.manual_seed_all(222)
    np.random.seed(222)

    print(args)

    config = [('conv2d', [64, 1, 3, 3, 2, 0]), ('relu', [True]), ('bn', [64]),
              ('conv2d', [64, 64, 3, 3, 2, 0]), ('relu', [True]), ('bn', [64]),
              ('conv2d', [64, 64, 3, 3, 2, 0]), ('relu', [True]), ('bn', [64]),
              ('conv2d', [64, 64, 2, 2, 1, 0]), ('relu', [True]), ('bn', [64]),
              ('flatten', []), ('linear', [args.n_way, 256])]

    #device = torch.device('cuda')
    device = torch.device('cpu')

    maml = Meta(args, config).to(device)

    tmp = filter(lambda x: x.requires_grad, maml.parameters())
    num = sum(map(lambda x: np.prod(x.shape), tmp))
    print(maml)
    print('Total trainable tensors:', num)

    db_train = SvhnNShot(batchsz=args.task_num,
                         n_way=args.n_way,
                         k_shot=args.k_spt,
                         k_query=args.k_qry,
                         imgsz=args.imgsz)

    for step in range(args.epoch):

        x_spt, y_spt, x_qry, y_qry = db_train.next()
        x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), \
                                     torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)

        # set traning=True to update running_mean, running_variance, bn_weights, bn_bias
        accs = maml(x_spt, y_spt.long(), x_qry, y_qry.long())

        if step % 50 == 0:
            print('step:', step, '\ttraining acc:', accs)

        if step % 500 == 0:
            accs = []
            for _ in range(1000 // args.task_num):
                # test
                x_spt, y_spt, x_qry, y_qry = db_train.next('test')
                x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), \
                                             torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)

                # split to single task each time
                for x_spt_one, y_spt_one, x_qry_one, y_qry_one in zip(
                        x_spt, y_spt, x_qry, y_qry):
                    test_acc = maml.finetunning(x_spt_one, y_spt_one.long(),
                                                x_qry_one, y_qry_one.long())
                    accs.append(test_acc)

            # [b, update_step+1]
            accs = np.array(accs).mean(axis=0).astype(np.float16)
            print('Test acc:', accs)
コード例 #2
0
ファイル: train.py プロジェクト: AnugunjNaman/Fixed-MAML
def main():
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    np.random.seed(1)
    print(args)

    config = [('conv2d', [args.num_filters, 1, 3, 3, 2, 1]), ('relu', [True]),
              ('bn', [args.num_filters]),
              ('conv2d', [args.num_filters, args.num_filters, 3, 3, 2, 1]),
              ('relu', [True]), ('bn', [args.num_filters]),
              ('conv2d', [args.num_filters, args.num_filters, 3, 3, 2, 1]),
              ('relu', [True]), ('bn', [args.num_filters]),
              ('conv2d', [args.num_filters, args.num_filters, 3, 3, 2, 1]),
              ('relu', [True]), ('bn', [args.num_filters]), ('flatten', []),
              ('linear', [args.n_way + 2, args.num_filters * 9])]

    device = torch.device('cuda')
    maml = Meta(args, config).to(device)

    tmp = filter(lambda x: x.requires_grad, maml.parameters())
    num = sum(map(lambda x: np.prod(x.shape), tmp))
    print(maml)
    print('Total trainable tensors:', num)

    # batchsz here means total sampled meta-task number
    if args.train == 'True':
        mini_train = LingualData('./data',
                                 mode='train',
                                 task_type=args.task_type,
                                 n_way=args.n_way,
                                 k_shot=args.k_spt_train,
                                 k_query=args.k_qry_train,
                                 k_unk_shot=args.k_spt_unk_train,
                                 k_unk_query=args.k_qry_unk_train,
                                 k_silence_shot=args.k_spt_silence_train,
                                 k_silence_query=args.k_qry_silence_train,
                                 batchsz=16000,
                                 resize=args.imgsz,
                                 unk_sil_spt=args.unk_sil_spt)

    exp_string = 'cls_' + str(args.n_way) + '.tskn_' + str(
        args.task_num) + '.spttrain_' + str(
            args.k_spt_train) + '.qrytrain_' + str(
                args.k_qry_train) + '.numstep' + str(
                    args.update_step) + '.updatelr' + str(args.update_lr)
    model_path = args.logdir + '/' + exp_string
    model_file = None

    if args.train == 'True':
        if not os.path.exists(model_path):
            os.makedirs(model_path)
            print("logs directory ", args.logdir, " created!")
        writer = SummaryWriter(model_path)
        set_logger(os.path.join(args.logdir, 'train.log'))
        train(maml, mini_train, model_path, args.resume_itr, device, writer)
    else:
        if args.test_iter >= 0:
            model_file = model_path + '/' + 'model-' + str(
                args.test_iter) + '.pth'
            test(maml, model_file, device)
コード例 #3
0
ファイル: convert.py プロジェクト: atremblay/video_conversion
def metaData(fileOrigin, dbPath):

    dirs = np.array(fileOrigin.split('/'))

    kindIndex = np.where((dirs == 'TV show') | (dirs == 'Movie'))
    if len(kindIndex) != 1:
        raise Except("Problem with path")

    tvshow, season, episode = getInfo(fileOrigin)

    kindIndex = kindIndex[0]
    metaD = {}
    metaD['videoKind'] = dirs[kindIndex]
    metaD['showName'] = dirs[kindIndex + 1]
    metaD['seasonNumber'] = season
    metaD['episodeNumber'] = episode

    if tvshow is not None and season is not None and episode is not None:
        meta = Meta(dbPath)
        soup = meta.get_meta(tvshow, season, episode)
        print("Episode Name and Description: {}".format(soup))
        if soup is None:
            metaD['episodeName'] = None
            metaD['episodeDescription'] = None
        else:
            episodeName, episodeDescription = soup
            metaD['episodeName'] = episodeName
            metaD['episodeDescription'] = episodeDescription
    else:
        metaD['episodeName'] = None
        metaD['episodeDescription'] = None

    return metaD
コード例 #4
0
def create_lmdb_meta_file(num_train_examples, num_val_examples, num_test_examples, path_to_lmdb_meta_file):
    print('Saving meta file to %s...' % path_to_lmdb_meta_file)
    meta = Meta()
    meta.num_train_examples = num_train_examples
    meta.num_val_examples = num_val_examples
    meta.num_test_examples = num_test_examples
    meta.save(path_to_lmdb_meta_file)
コード例 #5
0
def create_tfrecords_meta_file(num_train_examples,num_val_examples, num_test_examples,path_to_tfrecords_meta_file):
    print 'Saving meta file to %s...' % path_to_tfrecords_meta_file
    meta = Meta()
    meta.num_train_examples = num_train_examples
    meta.num_val_examples = num_val_examples
    meta.num_test_examples = num_test_examples
    meta.save(path_to_tfrecords_meta_file)
コード例 #6
0
def main(_):
    path_to_train_tfrecords_file = os.path.join(FLAGS.data_dir,
                                                'train.tfrecords')
    path_to_val_tfrecords_file = os.path.join(FLAGS.data_dir, 'val.tfrecords')
    path_to_tfrecords_meta_file = os.path.join(FLAGS.data_dir, 'meta.json')
    path_to_train_log_dir = FLAGS.train_logdir
    path_to_train_log_dir = os.path.join(
        FLAGS.train_logdir,
        "ssim_{:.2f}-defend_{}-attacker_{}".format(FLAGS.ssim_weight,
                                                   FLAGS.defend_layer,
                                                   FLAGS.attacker_type))
    print("log path: {}".format(path_to_train_log_dir))
    path_to_restore_model_checkpoint_file = FLAGS.restore_checkpoint
    training_options = {
        'batch_size': FLAGS.batch_size,
        'learning_rate': FLAGS.learning_rate,
        'patience': FLAGS.patience,
        'decay_steps': FLAGS.decay_steps,
        'decay_rate': FLAGS.decay_rate
    }

    meta = Meta()
    meta.load(path_to_tfrecords_meta_file)

    _train(path_to_train_tfrecords_file, meta.num_train_examples,
           path_to_val_tfrecords_file, meta.num_val_examples,
           path_to_train_log_dir, path_to_restore_model_checkpoint_file,
           training_options)
コード例 #7
0
ファイル: main.py プロジェクト: 63days/MAML
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)
    meta = Meta(inner_lr=args.inner_lr, outer_lr=args.outer_lr)
    meta.to(device)
    if not args.test:
        train_ds = Sinusoid(k_shot=args.k_shot, q_query=15, num_tasks=1000000)
        train_dl = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, pin_memory=True)

        best_loss = float('inf')
        losses = []
        pbar = tqdm(range(args.epochs))
        for epoch in pbar:
            k_i, k_o, q_i, q_o = next(iter(train_dl))
            k_i, k_o, q_i, q_o = k_i.to(device), k_o.to(device), q_i.to(device), q_o.to(device)
            loss = meta(k_i, k_o, q_i, q_o)
            losses.append(loss)
            pbar.set_description('| loss: {:.5f} |'.format(loss))

            if best_loss > loss:
                best_loss = loss
                meta.save_weights()

        plt.plot(losses)
        plt.savefig('loss_final.png', dpi=300)


    else:
        print('testing... k_shot:', args.k_shot)
        meta.test(k=args.k_shot)
コード例 #8
0
ファイル: forms.py プロジェクト: juan0830/Mereces-tu-diploma
class registrarJugadorForm(Form):
    Meta()
    """docstring for registrarJugador"""
    correo = EmailField('Correo de usuario para registrar', [
        Email("Ingrese un correo."),
        DataRequired("No deje el espacio vacío.")
    ])
    passwd = PasswordField('Ingrese contraseña para nuevo usuario', [
        Length(2, 25, "La contraseña debe ser entre 4 y 25 caracteres."),
        DataRequired("No deje el espacio vacío")
    ])
    grade = SelectMultipleField('Grado',
                                choices=[('kinder', 'kinder'),
                                         ('first', 'primero'),
                                         ('second', 'segundo'),
                                         ('third', 'tercero'),
                                         ('fourth', 'cuarto'),
                                         ('fifth', 'quinto'),
                                         ('sixth', 'sexto'),
                                         ('seventh', 'septimo'),
                                         ('eigth', 'octavo'),
                                         ('ninth', 'noveno'),
                                         ('tenth', 'decimo'),
                                         ('eleventh', 'once')])
    crearUsuario = SubmitField('Crear Usuario')
コード例 #9
0
def main():
    torch.manual_seed(222)
    torch.cuda.manual_seed_all(222)
    np.random.seed(222)
    test_result = {}
    best_acc = 0.0

    maml = Meta(args, Param.config).to(Param.device)
    maml = torch.nn.DataParallel(maml)
    opt = optim.Adam(maml.parameters(), lr=args.meta_lr)
    #opt = optim.SGD(maml.parameters(), lr=args.meta_lr, momentum=0.9, weight_decay=args.weight_decay)  

    tmp = filter(lambda x: x.requires_grad, maml.parameters())
    num = sum(map(lambda x: np.prod(x.shape), tmp))
    print(maml)
    print('Total trainable tensors:', num)

    trainset = MiniImagenet(Param.root, mode='train', n_way=args.n_way, k_shot=args.k_spt, k_query=args.k_qry, resize=args.imgsz)
    testset = MiniImagenet(Param.root, mode='test', n_way=args.n_way, k_shot=args.k_spt, k_query=args.k_qry, resize=args.imgsz)
    trainloader = DataLoader(trainset, batch_size=args.task_num, shuffle=True, num_workers=4, drop_last=True)
    testloader = DataLoader(testset, batch_size=4, shuffle=True, num_workers=4, drop_last=True)
    train_data = inf_get(trainloader)
    test_data = inf_get(testloader)

    for epoch in range(args.epoch):
        support_x, support_y, meta_x, meta_y = train_data.__next__()
        support_x, support_y, meta_x, meta_y = support_x.to(Param.device), support_y.to(Param.device), meta_x.to(Param.device), meta_y.to(Param.device)
        meta_loss = maml(support_x, support_y, meta_x, meta_y).mean()
        opt.zero_grad()
        meta_loss.backward()
        torch.nn.utils.clip_grad_value_(maml.parameters(), clip_value = 10.0)
        opt.step()
        plot.plot('meta_loss', meta_loss.item())

        if(epoch % 2000 == 999):
            ans = None
            maml_clone = deepcopy(maml)
            for _ in range(600):
                support_x, support_y, qx, qy = test_data.__next__()
                support_x, support_y, qx, qy = support_x.to(Param.device), support_y.to(Param.device), qx.to(Param.device), qy.to(Param.device)
                temp = maml_clone(support_x, support_y, qx, qy, meta_train = False)
                if(ans is None):
                    ans = temp
                else:
                    ans = torch.cat([ans, temp], dim = 0)
            ans = ans.mean(dim = 0).tolist()
            test_result[epoch] = ans
            if (ans[-1] > best_acc):
                best_acc = ans[-1]
                torch.save(maml.state_dict(), Param.out_path + 'net_'+ str(epoch) + '_' + str(best_acc) + '.pkl') 
            del maml_clone
            print(str(epoch) + ': '+str(ans))
            with open(Param.out_path+'test.json','w') as f:
                json.dump(test_result,f)
        if (epoch < 5) or (epoch % 100 == 99):
            plot.flush()
        plot.tick()
コード例 #10
0
ファイル: loader.py プロジェクト: multun/yiptables
 def __init__(self, stream):
     self.meta = Meta(stream, 1, 1)
     self._root = os.path.split(stream.name)[0]
     Reader.__init__(self, stream)
     Scanner.__init__(self)
     Parser.__init__(self)
     Composer.__init__(self)
     YipCons.__init__(self)
     Resolver.__init__(self)
コード例 #11
0
def main():
    torch.manual_seed(222)
    torch.cuda.manual_seed_all(222)
    np.random.seed(222)
    test_result = {}
    best_acc = 0.0

    maml = Meta(args, Param.config).to(Param.device)
    if n_gpus>1:
        maml = torch.nn.DataParallel(maml)
    state_dict = torch.load(Param.out_path+args.ckpt)
    print(state_dict.keys())
    pretrained_dict = OrderedDict()
    for k in state_dict.keys():
        if n_gpus==1:
            pretrained_dict[k[7:]] = deepcopy(state_dict[k])
        else:
            pretrained_dict[k[0:]] = deepcopy(state_dict[k])
    maml.load_state_dict(pretrained_dict)
    print("Load from ckpt:", Param.out_path+args.ckpt)
    
    #opt = optim.Adam(maml.parameters(), lr=args.meta_lr)
    #opt = optim.SGD(maml.parameters(), lr=args.meta_lr, momentum=0.9, weight_decay=args.weight_decay)  

    tmp = filter(lambda x: x.requires_grad, maml.parameters())
    num = sum(map(lambda x: np.prod(x.shape), tmp))
    print(maml)
    print('Total trainable tensors:', num)

    #trainset = MiniImagenet(Param.root, mode='train', n_way=args.n_way, k_shot=args.k_spt, k_query=args.k_qry, resize=args.imgsz)
    #valset = MiniImagenet(Param.root, mode='val', n_way=args.n_way, k_shot=args.k_spt, k_query=args.k_qry, resize=args.imgsz)
    testset = MiniImagenet(Param.root, mode='test', n_way=args.n_way, k_shot=args.k_spt, k_query=args.k_qry, resize=args.imgsz)
    #trainloader = DataLoader(trainset, batch_size=args.task_num, shuffle=True, num_workers=4, drop_last=True)
    #valloader = DataLoader(valset, batch_size=4, shuffle=True, num_workers=4, drop_last=True)
    testloader = DataLoader(testset, batch_size=4, shuffle=True, num_workers=4, drop_last=True)
    #train_data = inf_get(trainloader)
    #val_data = inf_get(valloader)
    test_data = inf_get(testloader)
    
    """Test for 600 epochs (each has 4 tasks)"""
    ans = None
    maml_clone = deepcopy(maml)
    for itr in range(600): # 600x4 test tasks
        support_x, support_y, qx, qy = test_data.__next__()
        support_x, support_y, qx, qy = support_x.to(Param.device), support_y.to(Param.device), qx.to(Param.device), qy.to(Param.device)
        temp = maml_clone(support_x, support_y, qx, qy, meta_train = False)
        if(ans is None):
            ans = temp
        else:
            ans = torch.cat([ans, temp], dim = 0)
        if itr%100==0:
            print(itr,ans.mean(dim = 0).tolist())
    ans = ans.mean(dim = 0).tolist()
    print('Acc: '+str(ans))
    with open(Param.out_path+'test.json','w') as f:
        json.dump(ans,f)
コード例 #12
0
 def create_tfrecords_meta_file(self,
                                dict_sample_number,
                                meta_name='batches_meta.json'):
     print('Saving meta file to %s...' % self.input_dir)
     meta = Meta(**dict_sample_number)
     with open(self.input_dir + '/cifar-10-batches-py/batches.meta',
               'rb') as f:
         content = pickle.load(f, encoding='latin1')
         meta.categories = content['label_names']
     meta.save(self.input_dir + '/' + meta_name)
コード例 #13
0
ファイル: forms.py プロジェクト: juan0830/Mereces-tu-diploma
class registrarAdminForm(Form):
    Meta()
    correo = EmailField('Correo de nuevo admin', [
        Email("Ingrese un correo."),
        DataRequired("No deje el espacio vacío.")
    ])
    passwd = PasswordField('Ingrese contraseña para nuevo admin', [
        Length(2, 25, "La contraseña debe ser entre 4 y 25 caracteres."),
        DataRequired("No deje el espacio vacío")
    ])
    crearAdmin = SubmitField('Crear Admin')
コード例 #14
0
def main(_):
    path_to_test_tfrecords_file = os.path.join(FLAGS.data_dir, 'generated.tfrecords')
    path_to_tfrecords_meta_file = os.path.join(FLAGS.data_dir, 'meta.json')
    path_to_checkpoint_dir = FLAGS.checkpoint_dir
    save_file = 'result_generated.txt'

    path_to_test_eval_log_dir = os.path.join(FLAGS.eval_logdir, 'test')

    meta = Meta()
    meta.load(path_to_tfrecords_meta_file)

    _eval(path_to_checkpoint_dir, path_to_test_tfrecords_file, meta.num_test_examples, path_to_test_eval_log_dir, save_file)
コード例 #15
0
ファイル: forms.py プロジェクト: juan0830/Mereces-tu-diploma
class loginAdminForm(Form):
    """El form para el login de los admins"""
    Meta()
    correo = EmailField('Correo de usuario', [
        Email("Ingrese un correo."),
        DataRequired("No deje el espacio vacío.")
    ])
    passwd = PasswordField('Ingrese contraseña', [
        Length(2, 25, "La contraseña es entre 2 y 25 caracteres."),
        DataRequired("No deje el espacio vacío")
    ])
    signIn = SubmitField('Sign In')
コード例 #16
0
ファイル: geoMain.py プロジェクト: qhemu/MetaGeo
def main():
    """
    preprocess_data() :     load data from dataset and precess data into numpy format
    process_data() :        port the data to pyTorch and convert to cuda
    U_train, U_dev, U_test, classLatMedian, classLonMedian, userLocation : only use when Valid and Test
    """
    data = preprocess_data(args)
    data = process_data(data, args, args.normalization, args.usecuda)

    (adj, features, labels, idx_train, idx_val, idx_test, U_train, U_dev, U_test,
     classLatMedian, classLonMedian, userLocation) = data
    # exit()
    model_file = "./result_cmu_desce/{}way{}shot{}query-update_lr:{}-weight_decay:{}.pkl".format(
        args.n_way, args.k_spt, args.k_qry, args.update_lr, args.weight_decay)

    device = torch.device('cuda')
    # maml = Meta(args, config).to(device)
    maml = Meta(
        args,
        features.shape[1],
        labels.max().item() + 1,
        classLatMedian,
        classLonMedian).to(device)
    if args.model == "SGC":
        feature_dump_file = os.path.join(args.dir, 'feature_dump.pkl')
        # if os.path.exists(feature_dump_file):
        # 	print("load features")
        # 	features = load_obj(feature_dump_file)
        # else:
        features = sgc_precompute(args, features, adj, args.degree)

        print(args.dataset)
        if args.splt == True:
            trainset, valset = dealdata1(args, labels)
        else:
            trainset, valset = dealdata(args, labels)

        users = U_train + U_dev + U_test
        class_acc, meanDis, MedianDis, accAT161 = train_regression(maml, labels, features, users, trainset, valset,
                                                                   classLatMedian, classLonMedian, userLocation,
                                                                   args.epochs, args.patience,
                                                                   model_file)
    # load model from file and test the model
    timeStr = time.strftime("%Y-%m-%d %H:%M:%S\t", time.localtime(time.time()))
    argsStr = "-dir:{}\t-{}way{}shot{}query\t-update_lr{}\t-decay:{}".format(
        args.dir, args.n_way, args.k_spt, args.k_qry, args.update_lr, args.weight_decay)
    resultStr = "Test:\tclassification_acc:{}\t\tMean:{}\t\tMedian:{}\t\tAcc@161:{}".format(class_acc, meanDis,
                                                                                            MedianDis, accAT161)
    content = "\n" + timeStr + "\n" + argsStr + "\n" + resultStr + "\n"
    with open('./result_cmu_desce/result.txt', 'a') as f:
        f.write(content)
    f.close()
コード例 #17
0
ファイル: forms.py プロジェクト: juan0830/Mereces-tu-diploma
class loginForm(Form):
    """El form para el login de los usuarios"""
    Meta()
    correo = EmailField('Correo de usuario', [
        Email("Ingrese un correo."),
        DataRequired("No deje el espacio vacío.")
    ])
    passwd = PasswordField('Ingrese contraseña', [
        Length(2, 25, "La contraseña es entre 4 y 25 caracteres."),
        DataRequired("No deje el espacio vacío")
    ])
    remember_me = BooleanField('remember me', default=False)
    signIn = SubmitField('Sign In')
コード例 #18
0
 def __init__(self, base, trace):
     """
     Arguments:
         base:           Base address of the allocation
         trace:          Full trace dictionary for the trace
     """
     self.base = base
     self.instruction = trace['IP']
     self.type = trace['Type']
     self.offset = trace['Address']['Value'] - base
     self.valueMeta = Meta(trace['Value'])
     self.size = self.valueMeta.Size
     self.value = self.valueMeta.Value
コード例 #19
0
def main():

    torch.manual_seed(222)
    torch.cuda.manual_seed_all(222)
    np.random.seed(222)
    print(args)
    config = [('conv2d', [32, 3, 3, 3, 1, 0]), ('relu', [True]), ('bn', [32]),
              ('max_pool2d', [2, 2, 0]), ('conv2d', [32, 32, 3, 3, 1, 0]),
              ('relu', [True]), ('bn', [32]), ('max_pool2d', [2, 2, 0]),
              ('conv2d', [32, 32, 3, 3, 1, 0]), ('relu', [True]), ('bn', [32]),
              ('max_pool2d', [2, 2, 0]), ('conv2d', [32, 32, 3, 3, 1, 0]),
              ('relu', [True]), ('bn', [32]), ('max_pool2d', [2, 1, 0]),
              ('flatten', []), ('linear', [args.n_way, 32 * 5 * 5])]

    device = torch.device('cuda')
    maml = Meta(args, config).to(device)

    ckpt_dir = "./checkpoint_miniimage.pth"
    print("Load trained model")
    ckpt = torch.load(ckpt_dir)
    maml.load_state_dict(ckpt['model'])

    mini_test = MiniImagenet("F:\\ACV_project\\MAML-Pytorch\\miniimagenet\\",
                             mode='test',
                             n_way=args.n_way,
                             k_shot=args.k_spt,
                             k_query=args.k_qry,
                             batchsz=1,
                             resize=args.imgsz)

    db_test = DataLoader(mini_test,
                         1,
                         shuffle=True,
                         num_workers=1,
                         pin_memory=True)
    accs_all_test = []
    #count = 0
    #print("Test_loader",db_test)

    for x_spt, y_spt, x_qry, y_qry in db_test:

        x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
        x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)

        accs = maml.finetunning(x_spt, y_spt, x_qry, y_qry)
        accs_all_test.append(accs)

        # [b, update_step+1]
        accs = np.array(accs_all_test).mean(axis=0).astype(np.float16)
        print('Test acc:', accs)
コード例 #20
0
def main():
    ck = util.checkpoint(args)
    seed = args.seed
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    torch.manual_seed(222)
    torch.cuda.manual_seed_all(222)
    np.random.seed(222)

    ck.write_log(str(args))
    # t = str(int(time.time()))
    # t = args.save_name
    # os.mkdir('./{}'.format(t))
    # (ch_out, ch_in, k, k, stride, padding)
    config = [('conv2d', [32, 16, 3, 3, 1, 1]), ('relu', [True]),
              ('conv2d', [32, 32, 3, 3, 1, 1]), ('relu', [True]),
              ('conv2d', [32, 32, 3, 3, 1, 1]), ('relu', [True]),
              ('conv2d', [32, 32, 3, 3, 1, 1]), ('relu', [True]),
              ('+1', [True]), ('conv2d', [3, 32, 3, 3, 1, 1])]

    device = torch.device('cuda')
    maml = Meta(args, config).to(device)

    params = torch.load(
        r'/flush5/sho092/Robust_learning/experiment/'
        r'2020-07-14-16:58:35_k0_metalr0.001_updatelr0.01_batchsz100000_updateStep7/'
        r'model/model_200.pt')

    DL_MSI = dl.StereoMSIDatasetLoader(args)
    dv = DL_MSI.valid_loader
    maml.net.load_state_dict(params, strict=False)
    maml.net.eval()
    for idx, (valid_ms, valid_rgb) in enumerate(dv):
        # print('idx', idx)
        valid_ms, valid_rgb = prepare([valid_ms, valid_rgb])
        sr_rgb = maml.net(valid_ms)
        print(sr_rgb.max(), sr_rgb.min())
        sr_rgb = torch.clamp(sr_rgb, 0, 1)

        imsave(
            '../experiment/{}.png'.format(idx),
            np.uint8(sr_rgb.cpu().squeeze().permute(1, 2, 0).detach().numpy() *
                     255))
コード例 #21
0
def main(_):
    path_to_train_tfrecords_file = os.path.join(FLAGS.data_dir,
                                                'train.tfrecords')
    path_to_val_tfrecords_file = os.path.join(FLAGS.data_dir, 'val.tfrecords')
    path_to_tfrecords_meta_file = os.path.join(FLAGS.data_dir,
                                               'tfrecords_meta.json')
    path_to_train_log_dir = FLAGS.train_logdir
    path_to_restore_checkpoint_file = FLAGS.restore_checkpoint

    meta = Meta()
    meta.load(path_to_tfrecords_meta_file)

    _train(path_to_train_tfrecords_file, path_to_val_tfrecords_file,
           meta.num_val_examples, path_to_train_log_dir,
           path_to_restore_checkpoint_file)
コード例 #22
0
def main():
    model = Learner(model_config)
    model = load_model(model)
    maml = Meta(exp_config, model).to(device)
    print_trainable_params(maml)

    train_task_dataloader = getTaskDataLoader(exp_config['train_path'],
                                              batch_size=10)
    valid_task_dataloader = getTaskDataLoader(exp_config['valid_path'],
                                              batch_size=1)

    for epoch in range(exp_config['epoch']):
        print("---------------- epoch %d ----------------" % epoch)
        train_epoch(maml, train_task_dataloader)
        valid_epoch(maml, valid_task_dataloader)

        save_model(model)
コード例 #23
0
def TracesForHeapAllocation(BaseAddresses, Size, Frames):
    """
    Arguments:
        BaseAddresses: Iterable which contains allocation base addresses
        Size:          Allocation size to match
        Frames:        Allocation stack trace to match
    Return:
        Generator to iterate over traces with heap interactions
        where the heap allocation has the specified size and
        allocation trace.
    """
    #
    # For each trace that reads data *from* our allocation, or writes
    # *to* our location, we can discern three things:
    #
    # - The offset within the allocation
    # - The size of the field at that offset
    # - Some information on the data being written to that field
    #
    for Base in BaseAddresses:
        print "Base address %#x...\n" % Base

        for Trace in query.TracesWhichInteractWithMemory(
                trace_types=['Read', 'Write'],
                value_types=['Address'],
                low=Base,
                high=Base + Size):
            #
            # Skip all traces which don't match for the allocation size,
            # or the stack frames.
            #
            Heap = Meta(Trace['Address']).Heap
            if (not Heap) or (Heap.Frames
                              not in Frames) or (Heap.Size != Size):
                continue

            #
            # Skip all traces that put us in memset/memcpy,
            # as these will generate false fields and field alignments
            #
            InsName = name.From(Trace['IP'])
            if 'memset' in InsName or 'memcpy' in InsName:
                # print "Mem(cpy|set)!"
                continue
            # print "!!! %s" % Trace
            yield Trace
コード例 #24
0
ファイル: eval.py プロジェクト: gjmulder/meter-pop
def main(_):
    path_to_train_tfrecords_file = os.path.join(FLAGS.data_dir, 'train.tfrecords')
    path_to_val_tfrecords_file = os.path.join(FLAGS.data_dir, 'val.tfrecords')
    path_to_test_tfrecords_file = os.path.join(FLAGS.data_dir, 'test.tfrecords')
    path_to_tfrecords_meta_file = os.path.join(FLAGS.data_dir, 'meta.json')
    path_to_checkpoint_dir = FLAGS.checkpoint_dir

    path_to_train_eval_log_dir = os.path.join(FLAGS.eval_logdir, 'train')
    path_to_val_eval_log_dir = os.path.join(FLAGS.eval_logdir, 'val')
    path_to_test_eval_log_dir = os.path.join(FLAGS.eval_logdir, 'test')

    meta = Meta()
    meta.load(path_to_tfrecords_meta_file)

    _eval(path_to_checkpoint_dir, path_to_train_tfrecords_file, meta.num_train_examples, path_to_train_eval_log_dir)
    _eval(path_to_checkpoint_dir, path_to_val_tfrecords_file, meta.num_val_examples, path_to_val_eval_log_dir)
    _eval(path_to_checkpoint_dir, path_to_test_tfrecords_file, meta.num_test_examples, path_to_test_eval_log_dir)
コード例 #25
0
def main_eval(_):

    parser = argparse.ArgumentParser(
        description="Evaluation Routine for SVHNClassifier")
    parser.add_argument("--data_dir",
                        required=True,
                        help="Directory to read TFRecords files")
    parser.add_argument("--path_to_checkpoint_dir",
                        required=True,
                        help="Directory to read checkpoint files")
    parser.add_argument("--eval_logdir",
                        required=True,
                        help="Directory to write evaluation logs")
    parser.add_argument("--path_to_train_tfrecords_file",
                        required=True,
                        help="Tfrecords file in train directory")
    parser.add_argument("--path_to_val_tfrecords_file",
                        required=True,
                        help="Tfrecords file in val directory")
    parser.add_argument("--path_to_test_tfrecords_file",
                        required=True,
                        help="Tfrecords file in test directory")
    parser.add_argument("--path_to_tfrecords_meta_file",
                        required=True,
                        help="Meta file in directory")
    parser.add_argument("--path_to_train_eval_log_dir",
                        required=True,
                        help="Training and evaluating log directory")
    parser.add_argument("--path_to_val_eval_log_dir",
                        required=True,
                        help="Validating and evaluating log directory")
    parser.add_argument("--path_to_test_eval_log_dir",
                        required=True,
                        help="Testing and evaluating log directory")
    args = parser.parse_args()

    meta = Meta()
    meta.load(args.path_to_tfrecords_meta_file)

    _eval(args.path_to_checkpoint_dir, args.path_to_train_tfrecords_file,
          meta.num_train_examples, args.path_to_train_eval_log_dir)
    _eval(args.path_to_checkpoint_dir, args.path_to_val_tfrecords_file,
          meta.num_val_examples, args.path_to_val_eval_log_dir)
    _eval(args.path_to_checkpoint_dir, args.path_to_test_tfrecords_file,
          meta.num_test_examples, args.path_to_test_eval_log_dir)
コード例 #26
0
    def getMeta(self, value):
        keysOnly = False
        results = self.request("GetMeta", value, keysOnly)

        keys = results.pop(0)
        pages = dict()

        for result in results:
            page = result.pop(0)
            meta = Meta()

            for key, values in zip(keys, result):
                if not values:
                    continue
                meta[key].update(values)

            pages[page] = meta

        return pages
コード例 #27
0
def main():
    torch.manual_seed(121)
    torch.cuda.manual_seed_all(121)
    np.random.seed(121)

    nshot = SinwaveNShot(all_numbers_class=2000,
                         batch_size=20,
                         n_way=5,
                         k_shot=5,
                         k_query=15,
                         root='data')
    maml = Meta(hid_dim=64, meta_lr=1e-3, update_lr=0.004)

    for step in range(10000):
        x_spt, y_spt, x_qry, y_qry, param_spt, param_qry = nshot.next('train')
        x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt), torch.from_numpy(
            y_spt), torch.from_numpy(x_qry), torch.from_numpy(y_qry)

        loss = maml(x_spt, y_spt, x_qry, y_qry)
        if step % 20 == 0:
            print('step:', step, '\ttraining loss:', loss)

        if step % 500 == 0:
            loss = []
            for _ in range(1000 // 20):
                # test
                x_spt, y_spt, x_qry, y_qry, param_spt, param_qry = nshot.next(
                    'test')
                x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt), torch.from_numpy(y_spt), \
                                             torch.from_numpy(x_qry), torch.from_numpy(y_qry)

                # split to single task each time
                for x_spt_one, y_spt_one, x_qry_one, y_qry_one, param_spt_one, param_qry_onein in \
                        zip(x_spt, y_spt, x_qry, y_qry, param_spt, param_qry):
                    test_acc = maml.finetunning(x_spt_one, y_spt_one,
                                                x_qry_one, y_qry_one,
                                                param_spt_one, param_qry_onein)
                    loss.append(test_acc)

            # [b, update_step+1]
            loss = np.array(loss).mean(axis=0).astype(np.float16)
            print('Test loss:', loss)
コード例 #28
0
def main(_):
    path_to_train_tfrecords_file = os.path.join(FLAGS.data_dir, 'train.tfrecords')
    path_to_val_tfrecords_file = os.path.join(FLAGS.data_dir, 'val.tfrecords')
    path_to_tfrecords_meta_file = os.path.join(FLAGS.data_dir, 'meta.json')
    path_to_train_log_dir = FLAGS.train_logdir
    path_to_restore_checkpoint_file = FLAGS.restore_checkpoint
    training_options = {
        'batch_size': FLAGS.batch_size,
        'learning_rate': FLAGS.learning_rate,
        'epoches': FLAGS.epoches,
        'decay_steps': FLAGS.decay_steps,
        'decay_rate': FLAGS.decay_rate
    }

    meta = Meta()
    meta.load(path_to_tfrecords_meta_file)

    _train(path_to_train_tfrecords_file, meta.num_train_examples,
           path_to_val_tfrecords_file, meta.num_val_examples,
           path_to_train_log_dir, path_to_restore_checkpoint_file,
           training_options)
コード例 #29
0
ファイル: make_cpp_wrapper.py プロジェクト: braph/libxml2
    def parse_header_files(self):
        fh = NamedTemporaryFile('w', prefix='libxml-cpp', suffix='.h')

        for f in self.doc.header_files:
            fh.write('#include <libxml/%s.h>\n' % f)
        fh.flush()

        self.ast = parse_file(
            fh.name,
            use_cpp=True,
            cpp_path='gcc',
            cpp_args=[
                '-E',
                '-D__attribute__(x)=',
                '-I.',
                '-Ifake_libc_include',
                '-I/usr/include/libxml2',
                # '-nostdinc', '-undef', '-I/usr/include',
            ])

        self.meta = Meta(self.ast)
コード例 #30
0
ファイル: layer.py プロジェクト: snegovick/map_editor
    def deserialize(self, data):
        from state import state
        self.name = data["name"]
        self.layer_type = data["layer_type"]
        self.adjacency_dct = {}
        for k, v in data["adjacency_dct"].iteritems():
            self.adjacency_dct[int(k)] = v

        for p in data["proxys"]:
            if self.layer_type == LayerType.meta:
                m = Meta("", state.get_grid_step())
                proxy = Proxy(sprite=m, state=state, data=p)
                m.name = str(proxy.id)
                m.update_text()
            else:
                proxy = Proxy(state=state, data=p)
            self.proxy_dct[proxy.id] = proxy

        #print "proxy_dct:", self.proxy_dct
        if len(self.proxy_dct.keys()) > 0:
            self.last_id = max(self.proxy_dct.keys()) + 1
        else:
            self.last_id = 0