Example #1
0
def ewc_process(epochs, importance, use_cuda=True, weight=None):
    model = MLP(hidden_size)
    if torch.cuda.is_available() and use_cuda:
        model.cuda()
    optimizer = optim.SGD(params=model.parameters(), lr=lr)

    loss, acc, ewc = {}, {}, {}
    for task in range(num_task):
        loss[task] = []
        acc[task] = []

        if task == 0:
            if weight:
                model.load_state_dict(weight)
            else:
                for _ in tqdm(range(epochs)):
                    loss[task].append(
                        normal_train(model, optimizer,
                                     train_loader[task]))  # noqa
                    acc[task].append(test(model, test_loader[task]))
        else:
            old_tasks = []
            for sub_task in range(task):
                old_tasks = old_tasks + train_loader[
                    sub_task].dataset.get_sample(sample_size)  # noqa
            old_tasks = random.sample(old_tasks, k=sample_size)
            for _ in tqdm(range(epochs)):
                loss[task].append(
                    ewc_train(model, optimizer, train_loader[task],
                              EWC(model, old_tasks), importance))  # noqa
                for sub_task in range(task + 1):
                    acc[sub_task].append(test(model, test_loader[sub_task]))

    return loss, acc
Example #2
0
 def test(self):
     utils.test(self,
                __file__,
                Codec,
                Codec.serialize,
                process_args=TreeNode.from_root_array,
                check_result=self.check_result)
Example #3
0
def main():
    file = "checkpoint/Densenet1968-2_newtest2.pth"
    model = torch.load(file)["net"].module.cpu()
    print(model)
    flops, params = profile(model, (1, 3, 32, 32))

    flops, params = flops.item(), params.item()
    wideresnet_params = 36500000
    wideresnet_flops = 10490000000
    score_flops = flops / wideresnet_flops
    score_params = params / wideresnet_params
    score = score_flops + score_params
    print("AGAINST WIDERESNET")
    print("Flops: {}, Params: {}".format(flops, params))
    print("Score flops: {} Score Params: {}".format(score_flops, score_params))
    print("Final score: {}".format(score))

    #    mobilenet_params = 6900000
    #    mobilenet_flops = 1170000000
    #    score_flops = flops/mobilenet_flops
    #    score_params = params/mobilenet_params
    #    score = score_flops + score_params
    #    print("AGAINST MOBILENET")
    #    print("Flops: {}, Params: {}".format(flops,params))
    #    print("Score flops: {} Score Params: {}".format(score_flops,score_params))
    #    print("Final score: {}".format(score))

    model = torch.load(file)["net"].module
    clean_trainloader, trainloader, testloader = utils.load_data(32,
                                                                 cutout=True)
    utils.test(model, testloader, "cuda", "no")
Example #4
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
    parser.add_argument('--hkd', default=0., type=float, help='')
    parser.add_argument('--temp', default=0., type=float, help='')
    parser.add_argument('--gkd', default=0., type=float, help='')
    parser.add_argument('--p', default=1, type=int, help='')
    parser.add_argument('--k', default=128, type=float, help='')
    parser.add_argument('--pool3', action='store_true', help='')
    args = parser.parse_args()
    save = "GKD_28-10_teaches_28-1_{}_{}_{}_{}_{}_{}".format(args.hkd,args.temp,args.gkd,args.p,args.k,args.pool3)


    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    trainloader, testloader = load_data(128,is_cifar10=False)
    file = "checkpoint/WideResNet28-10.pth"
    teacher = torch.load(file)["net"].module
    teacher = teacher.to(device)
    net = models.preact_resnet.PreActWideResNetStart(depth=28,width=1,num_classes=100)
    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
    scheduler = MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.2)
    for epoch in range(200):
        print('Epoch: %d' % epoch)
        train(net,trainloader,scheduler, device, optimizer,teacher=teacher,lambda_hkd=args.hkd,lambda_gkd=args.gkd,temp=args.temp,classes=100,power=args.p,pool3_only=args.pool3,k=args.k)
        test(net,testloader, device, save_name=save)
Example #5
0
def main():
    trainloader, testloader = utils.load_data(128)
    for filename in sorted(os.listdir('checkpoint')):
        print(filename)
        file = "checkpoint/{}".format(filename)
        model = torch.load(file)["net"].module
        utils.test(model, testloader, "cuda", "no", show="error")
Example #6
0
def main():
    torch.manual_seed(6717449005)

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    train_loader, test_loader = mnist_data()

    model = Lenet_300_100().to(device)
    optimizer = optim.Adam(model.parameters())
    criterion = my_loss(model, len(train_loader.dataset))

    train(model, device, train_loader, criterion, optimizer, n_epochs=400)

    model.set_flag('train', False)
    test(model, device, train_loader, criterion)

    model.set_flag('train', True)
    evaluate(model, device, train_loader, criterion, 10)

    criterion = my_loss(model, len(test_loader.dataset))

    model.set_flag('train', False)
    print('Mean')
    test(model, device, test_loader, criterion)

    model.set_flag('train', True)
    print('Stochastic')
    evaluate(model, device, test_loader, criterion, 10)
    save(model)
Example #7
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
    parser.add_argument('--depth', default=28, type=int, help='')
    parser.add_argument('--width', default=1, type=float, help='')
    parser.add_argument('--seed', default=0, type=int, help='')

    args = parser.parse_args()
    save = "WideResNet{}-{}_{}".format(args.depth, args.width, args.seed)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    trainloader, testloader = load_data(128)
    net = models.preact_resnet.PreActWideResNetStart(depth=args.depth,
                                                     width=args.width,
                                                     num_classes=10)
    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    optimizer = optim.SGD(net.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=5e-4)
    scheduler = MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.2)
    for epoch in range(200):
        print('Epoch: %d' % epoch)
        train(net, trainloader, scheduler, device, optimizer)
        test(net, testloader, device, save_name=save)
Example #8
0
 def test(self):
     utils.test(self,
                __file__,
                Solution,
                process_args=TreeNode.from_root_array,
                process_result=self.process_result,
                asserter=self.assertCountEqual)
Example #9
0
def test():
    """To test code in imported modules"""
    tableMaker.test()
    boardAnalyser.test()
    game.test()
    player.test()
    utils.test()
    ui.test()
Example #10
0
def train(ftrain, ftest, epochs, savemodel, saveloss, savetest):
    X, y = load2d(ftrain, test=False)
    net3 = create_network(epochs)
    net3.fit(X, y)
    sys.setrecursionlimit(1500000)
    with open(savemodel, 'wb') as f:
        pickle.dump(net3, f, -1)
    draw_loss_2(net3, saveloss)
    test(net3, ftest, savetest)
Example #11
0
def test_and_fuse(args, dense_folder, ply_folder):
    if args.no_test is not True:
        ut.test(dense_folder, args.ckpt_step, args.model_dir)
    ut.clear_old_points(dense_folder)
    ut.fuse(dense_folder, args.fusibile_path, args.prob_threshold,
            args.disp_threshold, args.num_consistent)
    ply_paths = ut.get_fusion_plys(dense_folder)
    urls = ut.handle_plys(ply_paths, dense_folder, ply_folder, args)
    print('Sketchfab url {}'.format(urls))
    return urls
Example #12
0
def fine_tune(fmodel, ftrain, epochs, ftest, savemodel, saveloss, savetest):
    X1, y1 = load2d(ftrain, test=False)
    listFrozens = []  #['conv1','conv2','conv3']
    newlayers = set_weights(fmodel, frozen=False, listLayers=listFrozens)
    net2 = build_model1(newlayers, epochs)
    net2.fit(X1, y1)

    sys.setrecursionlimit(1500000)
    with open(savemodel, 'wb') as f:
        pickle.dump(net2, f, -1)
    draw_loss_2(net2, saveloss)
    test(net2, ftest, savetest)
Example #13
0
def main():
    args = get_args()
    # Set the seed so results can be reproduced
    torch.manual_seed(SEED)
    # Check if CUDA is available, use it if so
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    model = get_model(args.arch).to(device)
    optimizer = optim.Adam(model.parameters(), lr=LR)

    train_history = TrainHistory(args.arch)

    if hasattr(model, "img_width"):
        transforms = get_transformation(model.img_width, model.img_height)
    else:
        transforms = get_transformation()

    # Set up image folder and loader for training and testing
    train_captcha_folder = datasets.ImageFolder(TRAIN_DIR,
                                                transform=transforms)
    train_loader = torch.utils.data.DataLoader(train_captcha_folder,
                                               batch_size=BATCH_SIZE,
                                               shuffle=True,
                                               num_workers=1)
    test_captcha_folder = datasets.ImageFolder(TEST_DIR, transform=transforms)
    test_loader = torch.utils.data.DataLoader(test_captcha_folder,
                                              batch_size=1000,
                                              shuffle=True,
                                              num_workers=1)

    print("Going to train for {} epochs".format(EPOCHS))
    for epoch in range(1, EPOCHS + 1):
        train(LOG_INTERVAL,
              model,
              device,
              train_loader,
              optimizer,
              epoch,
              get_target_from_indices,
              train_captcha_folder,
              args.model_file,
              train_history=train_history)
        test(model,
             device,
             test_loader,
             get_target_from_indices,
             test_captcha_folder,
             train_history=train_history)

        train_history.epoch_finish()
        train_history.save_history()
def main():
    torch.manual_seed(6717449005)

    criterion = nn.CrossEntropyLoss()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    train_loader, test_loader = mnist_data()

    model = Lenet_300_100_ID_L(350, 350, 50).to(device)
    optimizer = optim.Adam(model.parameters())

    train(model, device, train_loader, criterion, optimizer)
    test(model, device, test_loader, criterion)
    save(model)
def main():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
    parser.add_argument('--teacher_depth', default=18, type=int, help='')
    parser.add_argument('--teacher_width', default=8, type=int, help='')
    parser.add_argument('--teacher_mult', default=1.0, type=float, help='')
    parser.add_argument('--depth', default=18, type=int, help='')
    parser.add_argument('--width', default=8, type=int, help='')
    parser.add_argument('--mult', default=1.0, type=float, help='')
    parser.add_argument('--hkd', default=0., type=float, help='')
    parser.add_argument('--temp', default=0., type=float, help='')
    parser.add_argument('--gkd', default=0., type=float, help='')
    parser.add_argument('--p', default=1, type=int, help='')
    parser.add_argument('--seed', default=0, type=int, help='')
    parser.add_argument('--k', default=128, type=int, help='')
    parser.add_argument('--pool3', action='store_true', help='')
    parser.add_argument('--intra_only', action='store_true', help='')
    parser.add_argument('--inter_only', action='store_true', help='')
    args = parser.parse_args()
    save = "FineTune{}-{}-{}_0.pth".format(args.teacher_depth,
                                           args.teacher_width,
                                           args.teacher_mult)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    trainloader, testloader, clean_trainloader = load_data(128)
    file = "checkpoint/NearMobileNet{}-{}-{}_0.pth".format(
        args.teacher_depth, args.teacher_width, args.teacher_mult)
    teacher = torch.load(file)["net"].module
    teacher = teacher.to(device)
    parameters = list()
    if device == 'cuda':
        teacher = torch.nn.DataParallel(teacher)
        cudnn.benchmark = True


#    optimizer = optim.AdamW(teacher.parameters(), lr=0.00001, weight_decay=5e-4)
    optimizer = optim.SGD(teacher.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=5e-4)
    scheduler = MultiStepLR(optimizer, milestones=[55, 120, 160], gamma=0.2)
    predicted_student, labels = test(teacher,
                                     testloader,
                                     device,
                                     save_name=save)
    for epoch in range(50):
        print('Epoch: %d' % epoch)
        train(teacher, clean_trainloader, scheduler, device, optimizer)
        predicted_student, labels = test(teacher,
                                         testloader,
                                         device,
                                         save_name=save)
Example #16
0
def main():
    global args
    net = UNet(3, 1)
    net.load(opt.ckpt_path)
    loss = Loss('soft_dice_loss')
    torch.cuda.set_device(0)
    net = net.cuda()
    loss = loss.cuda()

    if args.phase == 'train':
        # train
        dataset = NucleiDetector(opt, phase=args.phase)
        train_loader = DataLoader(dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)
        lr = opt.lr
        optimizer = torch.optim.Adam(net.parameters(),
                                     lr=lr,
                                     weight_decay=opt.weight_decay)
        previous_loss = None  # haven't run
        for epoch in range(opt.epoch + 1):
            now_loss = train(train_loader, net, loss, epoch, optimizer,
                             opt.model_save_freq, opt.model_save_path)
            if previous_loss is not None and now_loss > previous_loss:
                lr *= opt.lr_decay
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr
                save_lr(net.model_name, opt.lr_save_path, lr)
            previous_loss = now_loss
    elif args.phase == 'val':
        # val phase
        dataset = NucleiDetector(opt, phase='val')
        val_loader = DataLoader(dataset,
                                batch_size=opt.batch_size,
                                shuffle=True,
                                num_workers=opt.num_workers,
                                pin_memory=opt.pin_memory)
        val(val_loader, net, loss)
    else:
        # test phase
        dataset = NucleiDetector(opt, phase='test')
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=opt.num_workers,
                                 pin_memory=opt.pin_memory)
        test(test_loader, net, opt)
Example #17
0
def standard_process(epochs, use_cuda=True, weight=True):
    model = classifier()  # TODO: fix parms MLP(hidden_size)
    if torch.cuda.is_available() and use_cuda:
        model.cuda()

    # if torch.cuda.device_count() > 1:
    #     print("Let's use", torch.cuda.device_count(), "GPUs!")
    #     model = nn.DataParallel(model)

    # model.to(device)

    optimizer = optim.SGD(params=model.parameters(), lr=lr)

    loss, acc = {}, {}
    for task in range(num_task):
        loss[task] = []
        acc[task] = []
        for _ in tqdm(range(epochs)):
            loss[task].append(
                normal_train(model, optimizer, train_loader[task]))
            for sub_task in range(task + 1):
                acc[sub_task].append(test(model, test_loader[sub_task]))
        if task == 0 and weight:
            weight = model.state_dict()
    return loss, acc, weight
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--EXP_NAME', type=str, default='tiramisu12')
    parser.add_argument('--EXP_DIR',
                        type=str,
                        default='/disk3/yangle/code/benchmark/tiramisu/')
    parser.add_argument('--CAMVID_PATH', type=str, default='data/CamVid/')
    parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
    parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
    args = parser.parse_args()

    normalize = transforms.Normalize(mean=camvid.mean, std=camvid.std)
    test_dset = camvid.CamVid(args.CAMVID_PATH,
                              'test',
                              joint_transform=None,
                              transform=transforms.Compose(
                                  [transforms.ToTensor(), normalize]))
    test_loader = torch.utils.data.DataLoader(test_dset,
                                              batch_size=2,
                                              shuffle=False)

    model = tiramisu.FCDenseNet103(n_classes=12)
    model = model.cuda()
    optimizer = optim.RMSprop(model.parameters(),
                              lr=args.LEARNING_RATE,
                              weight_decay=args.WEIGHT_DECAY)
    experiment = experiment.Experiment(args.EXP_NAME, args.EXP_DIR)
    experiment.resume(model, optimizer)

    criterion = nn.NLLLoss2d(weight=camvid.class_weight.cuda()).cuda()
    test_loss, test_error = utils.test(model, test_loader, criterion)
    print(test_loss)
    print(test_error)
Example #19
0
def test():
    """Test script for the Quantum Information Toolkit.
    """
    # Ville Bergholm 2009-2011

    import utils

    lmap.test()
    utils.test()
    state.test()
    gate.test()
    ho.test()
    invariant.test()
    markov.test()
    seq.test()
    print('All tests passed.')
Example #20
0
def main(test_file,
         vocab_file,
         embeddings_file,
         pretrained_file,
         max_length=50,
         gpu_index=0,
         batch_size=128):

    device = torch.device(
        "cuda:{}".format(gpu_index) if torch.cuda.is_available() else "cpu")
    print(20 * "=", " Preparing for testing ", 20 * "=")
    if platform == "linux" or platform == "linux2":
        checkpoint = torch.load(pretrained_file)
    else:
        checkpoint = torch.load(pretrained_file, map_location=device)
    # Retrieving model parameters from checkpoint.
    embeddings = load_embeddings(embeddings_file)
    print("\t* Loading test data...")
    test_data = LCQMC_Dataset(test_file, vocab_file, max_length)
    test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
    print("\t* Building model...")
    model = SiaGRU(embeddings, device=device).to(device)
    model.load_state_dict(checkpoint["model"])
    print(20 * "=", " Testing SiaGRU model on device: {} ".format(device),
          20 * "=")
    batch_time, total_time, accuracy, auc = test(model, test_loader)
    print(
        "\n-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, accuracy: {:.4f}%, auc: {:.4f}\n"
        .format(batch_time, total_time, (accuracy * 100), auc))
    def calculate(self):
        for variable in [
                self.attri, self.train, self.focus, self.dn[0], self.dn[1]
        ]:
            try:
                variable.get()
            except:
                variable.set(0)

        probabilities = test(
            self.attri.get() + self.train.get(),
            [self.train.get(), self.focus.get()],
            [self.dn[0].get(), self.dn[1].get()],
            verbose=False)

        self.plot.set_title('Success Distribution')
        self.plot.set_xlabel('Number of Successes')
        self.plot.set_ylabel('Likelihood')
        self.plot.bar(range(probabilities.shape[0]), probabilities)
        self.canvas.draw()
        self.plot.clear()
        self.plot.cla()

        self.succ_lik.set('{:2.2%}'.format(
            np.sum(probabilities[self.dn[1].get():])))
        self.succ_exp.set('{:2.3}'.format(
            np.matmul(range(probabilities.shape[0]), probabilities)))
Example #22
0
def fasttext_evaluation(
        model_testing_result: OutputDirectory(type='AnyDirectory'),
        trained_model_dir: InputDirectory(type='AnyDirectory') = None,
        test_data_dir: InputDirectory(type='AnyDirectory') = None,
        char2index_dir: InputDirectory(type='AnyDirectory') = None):
    print('=====================================================')
    print(f'trained_model_dir: {Path(trained_model_dir).resolve()}')
    print(f'test_data_dir: {Path(test_data_dir).resolve()}')
    print(f'char2index_dir: {Path(char2index_dir).resolve()}')

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    max_len_ = 38
    path = os.path.join(test_data_dir, 'test.txt')
    test_samples = load_dataset(file_path=path,
                                max_len=max_len_,
                                char2index_dir=char2index_dir)

    test_iter = DataIter(test_samples)

    path = os.path.join(trained_model_dir, 'BestModel')
    model = torch.load(f=path)

    path = os.path.join(model_testing_result, 'result.json')
    acc_ = test(model, test_iter, device)
    json.dump({"acc": acc_}, open(path, 'w'))
    print('\n============================================')
def model_load_test(test_df, target_dir, test_prediction_dir, test_prediction_name, max_seq_len=64, num_labels=2, batch_size=32):
    bertmodel = DistilBertModel(requires_grad = False, num_labels = num_labels)
    tokenizer = bertmodel.tokenizer
    device = torch.device("cuda")
    print(20 * "=", " Preparing for testing ", 20 * "=")
    if platform == "linux" or platform == "linux2":
        checkpoint = torch.load(os.path.join(target_dir, "best.pth.tar"))
    else:
        checkpoint = torch.load(os.path.join(target_dir, "best.pth.tar"), map_location=device)
    # Retrieving model parameters from checkpoint.
    print("\t* Loading test data...")    

    test_data = DataPrecessForSentence(tokenizer,test_df, max_seq_len) 
    test_loader = DataLoader(test_data, shuffle=False, batch_size=batch_size)

    print("\t* Building model...")
    
    model = bertmodel.to(device)
    model.load_state_dict(checkpoint["model"])
    print(20 * "=", " Testing BERT model on device: {} ".format(device), 20 * "=")
    batch_time, total_time, accuracy, predictions = test(model, test_loader)
    print("\n-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, accuracy: {:.4f}%\n".format(batch_time, total_time, (accuracy*100)))
    test_prediction = pd.DataFrame({'prediction':predictions})
    if not os.path.exists(test_prediction_dir):
        os.makedirs(test_prediction_dir)
    test_prediction.to_csv(os.path.join(test_prediction_dir,test_prediction_name), index=False)
Example #24
0
def main(test_file, vocab_file, embeddings_file, pretrained_file, max_length=50, gpu_index=0, batch_size=128):
    """
    Test the ESIM model with pretrained weights on some dataset.
    Args:
        test_file: The path to a file containing preprocessed NLI data.
        pretrained_file: The path to a checkpoint produced by the
            'train_model' script.
        vocab_size: The number of words in the vocabulary of the model
            being tested.
        embedding_dim: The size of the embeddings in the model.
        hidden_size: The size of the hidden layers in the model. Must match
            the size used during training. Defaults to 300.
        num_classes: The number of classes in the output of the model. Must
            match the value used during training. Defaults to 3.
        batch_size: The size of the batches used for testing. Defaults to 32.
    """
    device = torch.device("cuda:{}".format(gpu_index) if torch.cuda.is_available() else "cpu")
    print(20 * "=", " Preparing for testing ", 20 * "=")
    if platform == "linux" or platform == "linux2":
        checkpoint = torch.load(pretrained_file)
    else:
        checkpoint = torch.load(pretrained_file, map_location=device)
    # Retrieving model parameters from checkpoint.
    hidden_size = checkpoint["model"]["projection.0.weight"].size(0)
    num_classes = checkpoint["model"]["classification.6.weight"].size(0)
    embeddings = load_embeddings(embeddings_file)
    print("\t* Loading test data...")    
    test_data = LCQMC_Dataset(test_file, vocab_file, max_length)
    test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
    print("\t* Building model...")
    model = ESIM(hidden_size, embeddings=embeddings, num_classes=num_classes, device=device).to(device)
    model.load_state_dict(checkpoint["model"])
    print(20 * "=", " Testing ESIM model on device: {} ".format(device), 20 * "=")
    batch_time, total_time, accuracy, auc = test(model, test_loader)
    print("\n-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, accuracy: {:.4f}%, auc: {:.4f}\n".format(batch_time, total_time, (accuracy*100), auc))
Example #25
0
def main():
    parser = argparse.ArgumentParser(description="Available Parameters:")
    parser.add_argument("--n_hidden_units", default=64, type=int)
    parser.add_argument("--n_hidden_layers", default=1, type=int)
    parser.add_argument("--train_epochs", default=100, type=int)
    parser.add_argument("--write_output", default=True, type=bool)
    args = parser.parse_args()

    torch.manual_seed(0)
    np.random.seed(0)

    profiles = pd.read_csv("../data/new_profiles_200t.csv")
    comments = pd.read_csv("../data/new_comments_200t.csv")

    comments = comments.drop_duplicates()
    profiles = preprocessing.categorical_to_numerical(profiles, col="category_1")
    all_users = set(profiles.profile_username.values)

    data = preprocessing.scale(profiles.drop(columns=["category_1", "profile_username"]).values)
    name_to_record = {name: record for name, record in zip(all_users, data)}

    input_dim, output_dim = data.shape[1], len(profiles.category_1.unique()) + 1
    user_to_label = {user: category for user, category in profiles[["profile_username", "category_1"]].values}

    K = 5
    skf = StratifiedKFold(n_splits=K)
    models_metrics, models_histories = defaultdict(dict), defaultdict(list)
    for kth_fold, (train_idx, test_idx) in enumerate(skf.split(profiles.profile_username.values, profiles.category_1.values), start=1):
        print("Starting {}th Fold".format(kth_fold))

        authors = profiles.profile_username.values
        username_to_index = utils.get_users_indices(authors)
        interactions = utils.get_interactions(comments, username_to_index)
        edge_index = utils.get_edge_index(interactions)
        
        x = utils.get_x(authors, name_to_record, input_dim=input_dim)
        y = utils.get_y(user_to_label, authors)

        train_mask = [True if i in train_idx else False for i in range(len(x))]
        test_mask = [True if i in test_idx else False for i in range(len(x))]
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        data = Data(x=x, y=y, edge_index=edge_index, train_mask=train_mask, test_mask=test_mask).to(device)

        assert len(x)==len(y), "Train Input and Output tensor do not have the same dimensions"

        models = utils.get_models(data.num_nodes, input_dim, output_dim, args.n_hidden_units, args.n_hidden_layers, device=device, lr=0.005)
        histories = utils.train(data, models, epochs=args.train_epochs)
        models_histories = utils.update_histories(models_histories, histories)

        current_metrics = utils.test(data, models)
        utils.update_metrics_dict(models_metrics, current_metrics)

        print('\n')
        
    models_histories = {model: list(history/K) for model, history in models_histories.items()} # Get mean traces
    models_metrics = utils.calculate_statistics(models_metrics)

    if args.write_output:
        utils.write_json("../data/results/models_metrics_{}e_{}l_{}u.json".format(args.train_epochs, args.n_hidden_layers, args.n_hidden_units), models_metrics)
        utils.write_json("../data/results/models_histories_{}e_{}l_{}u.json".format(args.train_epochs, args.n_hidden_layers, args.n_hidden_units), models_histories)
def train_model(
    model_cls,
    dataset,
    y,
    edges,
    train_mask,
    val_mask,
    test_mask,
    epochs,
    device,
    seed=0,
    end=" ",
):
    torch.manual_seed(seed)
    data = dataset[0]
    model = model_cls(dataset.num_features, dataset.num_classes).to(device)
    data = data.to(device)
    edges = edges.to(device)
    y = y.to(device)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.01,
                                 weight_decay=5e-4)
    best_val_acc = test_acc = 0
    for epoch in range(epochs):
        enlarge_train(model, optimizer, data, y, edges, train_mask)
        train_acc, val_acc, tmp_test_acc = test(model, data, edges, train_mask,
                                                val_mask, test_mask)
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            test_acc = tmp_test_acc
    print(epochs, "seed:", seed, train_acc, val_acc, test_acc, end=end)
    return model
Example #27
0
def train():

    device = 'cuda' if torch.cuda.is_available() and is_gpu else 'cpu'
    print("Using Device -", device)

    print("Start Training Process...\n")

    train_dataset = utils.tranform_train_dataset(train_dir)
    trainloader = utils.DataLoader(train_dataset, 64)

    valid_dataset = utils.transform_valid_test_dataset(valid_dir)
    validloader = utils.DataLoader(valid_dataset, 64)

    test_dataset = utils.transform_valid_test_dataset(test_dir)
    testloader = utils.DataLoader(test_dataset, 64)

    fc_model = OrderedDict([
        ('fc1', nn.Linear(25088, hidden_layer_1)),
        ('relu', nn.ReLU()),
        ('dropout1', nn.Dropout(dropout)),
        ('fc2', nn.Linear(hidden_layer_1, hidden_layer_1)),
        ('relu', nn.ReLU()),
        #('fc3',nn.Linear(256,256)),
        #('relu',nn.ReLU()),
        ('fc4', nn.Linear(hidden_layer_1, 102)),
        ('output', nn.LogSoftmax(dim=1))
    ])

    model = utils.build_network(fc_model, network, dropout, device)
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)

    model = utils.train(model, trainloader, epochs, learning_rate, criterion,
                        optimizer, device, validloader)
    model.class_to_idx = train_dataset.class_to_idx
    utils.save_checkpoint(model, optimizer, epochs, save_dir, network,
                          hidden_layer_1, dropout, learning_rate)

    print("End Training Process...\n")

    print("Start Test Process...\n")

    utils.test(model, testloader, criterion, device)

    print("End Test Process...\n")
def testit(sequenceLabeler):
    passed = 0
    for sample in samples:
        pred = postprocess(test(sequenceLabeler, sample[0]))
        if pred == sample[1]:
            passed += 1

    print "\n======== ACCURACY:[", (passed*1.0/len(samples))*100, "% ] ======"
    print "====================================\n"
Example #29
0
    def evaluate(self):
        val_acc, val_loss = test(test_loader=self.test_loader,
                                 model=self.model,
                                 criterion=self.criterion,
                                 device=self.device)
        print('Accuracy: {} Validate Loss: {}'.format(val_acc, val_loss))

        self.history['val_acc'].append(val_acc)
        self.history['val_loss'].append(val_loss)
Example #30
0
def l0_process(epochs, importance, use_cuda=True, weight=None):
    model = L0LeNet5(10)
    if torch.cuda.is_available() and use_cuda:
        model.cuda()
    optimizer = optim.SGD(params=model.parameters(), lr=lr)

    loss, acc, ewc = {}, {}, {}
    for task in range(num_task):
        loss[task] = []
        acc[task] = []

        if task == 0:
            if weight:
                model.load_state_dict(weight)
            else:
                for epoch in tqdm(range(epochs)):
                    print("Epoch {}".format(epoch))
                    loss[task].append(
                        train_l0(model, optimizer, train_loader[task]))
                    task_test_acc = test(model, test_loader[task])
                    print('Test acc for task {}, epoch {}, acc {}'.format(
                        task, epoch, task_test_acc))
                    acc[task].append(task_test_acc)
        else:
            old_tasks = []
            for sub_task in range(task):
                old_tasks = old_tasks + train_loader[
                    sub_task].dataset.get_sample(sample_size)
            old_tasks = random.sample(old_tasks, k=sample_size)
            train_l0(model, optimizer, old_tasks)
            l0_scores, model_params = get_l0_scores_and_model_params(model)
            for _ in tqdm(range(epochs)):
                loss[task].append(
                    l0_train_trans(model, optimizer, train_loader[task],
                                   l0_scores, model_params, importance))
                for sub_task in range(task + 1):
                    task_test_acc = test(model, test_loader[sub_task])
                    print('Test acc for task {}, epoch {}, acc {}'.format(
                        task, epoch, task_test_acc))
                    acc[sub_task].append(task_test_acc)

    print(loss)
    print(acc)
    return loss, acc
Example #31
0
def cross_validate(n_epochs, _min, _max, groups):
    """
    :param n_epochs: number of epochs.
    :param _min: min value for normalization.
    :param _max: max value for normalization.
    :param groups: groups: array of equal sized numpy matrices.
    :return: run groups.shape[0] times, and each time takes a different group
    to be the validation set, and all the rest are the training set. trains new
    perceptron alg' and print its accuracy. returns the average accuracy.
    """

    k = groups.shape[0]
    _sum = 0

    for i in range(k):

        train_set = None
        valid_set = np.copy(
            groups[i])  # the validation set for th i'th iteration.

        # p = perceptron()  # a new (!) perceptron object.
        svm = SVM(eta=0.999, c=0.01)
        # pa = PA()

        for j in range(k):

            if j != i:
                # arange the train set for the i'th iteration.
                if train_set is None:
                    train_set = np.copy(groups[j])
                else:
                    train_set = np.concatenate((train_set, groups[j]), axis=0)

        mins, denoms = norm.minmax_params(train_set)
        train_set = norm.minmax(train_set, _min, _max)
        valid_set = norm.minmax(valid_set, _min, _max, mins, denoms)

        # training the model with the i'th training set.
        # utils.train(p, train_set, n_epochs)
        utils.train(svm, train_set, n_epochs)
        # utils.train(pa, train_set, n_epochs)

        # printing the result of the i'th test on the validation set,
        # and saving the sum.
        print("iteration number " + str(i + 1) + " : ", end='')
        # _sum += utils.test(p, valid_set)
        _sum += utils.test(svm, valid_set)
        # _sum += utils.test(pa, valid_set)

    accuracy = float(_sum) / k

    # prints the average accuracy of the cross validation.
    print("the average accuracy of this session: " + str(accuracy) + " %\n")
    print("---------\n")

    return accuracy
def model_load_test(test_df,
                    target_dir,
                    test_prediction_dir,
                    test_prediction_name,
                    max_seq_len=50,
                    batch_size=32):
    """
    Parameters
    ----------
    test_df : pandas dataframe of test set.
    target_dir : the path of pretrained model.
    test_prediction_dir : the path that you want to save the prediction result to.
    test_prediction_name : the file name of the prediction result.
    max_seq_len: the max truncated length.
    batch_size : the default is 32.
    
    """
    bertmodel = BertModel(requires_grad=False)
    tokenizer = bertmodel.tokenizer
    device = torch.device("cuda")

    print(20 * "=", " Preparing for testing ", 20 * "=")
    if platform == "linux" or platform == "linux2":
        checkpoint = torch.load(os.path.join(target_dir, "best.pth.tar"))
    else:
        checkpoint = torch.load(os.path.join(target_dir, "best.pth.tar"),
                                map_location=device)

    print("\t* Loading test data...")
    test_data = DataPrecessForSentence(tokenizer,
                                       test_df,
                                       max_seq_len=max_seq_len)
    test_loader = DataLoader(test_data, shuffle=False, batch_size=batch_size)

    # Retrieving model parameters from checkpoint.
    print("\t* Building model...")
    model = bertmodel.to(device)
    model.load_state_dict(checkpoint["model"])
    print(20 * "=", " Testing BERT model on device: {} ".format(device),
          20 * "=")

    batch_time, total_time, accuracy, all_prob = test(model, test_loader)
    print(
        "\n-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, accuracy: {:.4f}%\n"
        .format(batch_time, total_time, (accuracy * 100)))

    test_prediction = pd.DataFrame({'prob_1': all_prob})
    test_prediction['prob_0'] = 1 - test_prediction['prob_1']
    test_prediction['prediction'] = test_prediction.apply(
        lambda x: 0 if (x['prob_0'] > x['prob_1']) else 1, axis=1)
    test_prediction = test_prediction[['prob_0', 'prob_1', 'prediction']]
    if not os.path.exists(test_prediction_dir):
        os.makedirs(test_prediction_dir)
    test_prediction.to_csv(os.path.join(test_prediction_dir,
                                        test_prediction_name),
                           index=False)
def testAPI():
    global vw, sequenceLabeler
    model = request.args.get("model", "tagger1.bin")
    try:
        query = request.args.get("query")
        query = query.strip().lower()
        chunked = test(sequenceLabeler, query)
        return json.dumps(chunked)
    except:
        return "'query' field not present OR error in prediction"
Example #34
0
def main(fn, output_fn):
    print("Reading in dataset")
    data, classes = readDataset(fn)
    print(len(data), " sequences found")
    print("Found classes:", sorted(classes))
    proc = Processor(classes, 2, 2, prefix=(1,3), affix=(2,1), hashes=2,
            features=100000, stem=False, ohe=False)

    yss = []
    ryss = []
    for Xs in data:
        ys = [x['output'] for x in Xs]
        yss.append(ys)
        ryss.append([proc.encode_target(ys, i) for i in range(len(ys))])

    rs = np.random.RandomState(seed=2016)
    print("Starting KFolding")
    y_trues, y_preds = [], []
    fold_object = KFold(5, random_state=1)
    for train_idx, test_idx in fold_object.split(data):
        tr_X, tr_y = subset(data, yss, train_idx, rs)
        test_data = subset(data, yss, test_idx, rs, False)

        print("Training")
        d = Dagger(proc, tr_X, tr_y, validation_set=test_data)
        clf = d.train(10)

        seq = Sequencer(proc, clf)

        print("Testing")
        y_true, y_pred = test(data, ryss, test_idx, seq)
#        print(y_true, y_pred, proc.labels)
        print( classification_report(y_true, y_pred))

        y_trues.extend(y_true)
        y_preds.extend(y_pred)

    print("Total Report")
    print(classification_report(y_trues, y_preds, target_names=proc.labels))

    print("Training all")
    idxs = range(len(data))
    tr_X, tr_y = subset(data, yss, idxs, rs)
    d = Dagger(proc, tr_X, tr_y)
    clf = d.train()
    seq = Sequencer(proc, clf)

    save(output_fn, seq)
Example #35
0
File: test.py Project: ckmetto/iw
 def test1_randomstops(self):
     utils.test(lambda: gen.random_stops(N=100))
def runTest():
  print 'match_ends'
  utils.test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
  utils.test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
  utils.test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)

  print
  print 'front_x'
  utils.test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
       ['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
  utils.test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
       ['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
  utils.test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
       ['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])

  print
  print 'sort_last'
  utils.test(sort_last([(1, 3), (3, 2), (2, 1)]),
  [(2, 1), (3, 2), (1, 3)])
  utils.test(sort_last([(2, 3), (1, 2), (3, 1)]),
  [(3, 1), (1, 2), (2, 3)])
  utils.test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
  [(2, 2), (1, 3), (3, 4, 5), (1, 7)])

  print
  print 'remove_adjacent'
  utils.test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
  utils.test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
  utils.test(remove_adjacent([]), [])

  print
  print 'linear_merge'
  utils.test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
       ['aa', 'bb', 'cc', 'xx', 'zz'])
  utils.test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
       ['aa', 'bb', 'cc', 'xx', 'zz'])
  utils.test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
       ['aa', 'aa', 'aa', 'bb', 'bb'])
Example #37
0
# Get digit wireframe of hand and present to user
def main(frame):
    # Get the guitar model
    guitar = guitarmodel.get_guitar(frame)

    # Return unchanged frame if no guitar
    if not guitar:
        return frame
    
    # Get the hands (list of fingertips) in the frame
    (pickhand, frethand) = get_hands(frame, guitar)
    
    # Print for debugging and testing purposes
    sys.stderr.write( errorstring.format(list(pickhand), list(frethand)) ) 
    
    # Add overlay circles on fingers for display
    frame = utils.addrectangle(frame, pickhand_coords)
    frame = utils.addtext(frame, "Pickhand has found {} fingertips".format(sum([1 for _ in pickhand])), location="ur")
    frame = reduce(utils.addcircle, [frame] + list(pickhand))
    frame = utils.addrectangle(frame, frethand_coords)
    frame = utils.addtext(frame, "Frethand has found {} fingertips".format(sum([1 for _ in frethand])), location="ul")
    frame = reduce(utils.addcircle, [frame] + list(frethand))
    return frame

if __name__ == '__main__':
    if len(sys.argv) == 2:
        utils.test(sys.argv[1], main)
    else:
        utils.capture(main)
Example #38
0
#!/usr/bin/env python

import utils

utils.test(utils.hamming_distance(utils.str_to_bin('this is a test'),
                                  utils.str_to_bin('wokka wokka!!!')), 37)

with open('files/6.txt') as f:
    text = f.read().decode('base64')

bin_text = utils.str_to_bin(text)
bin_text_arr = bin_text.split()
min_distance = 0
sizes = []

keysizes = []
for keysize in range(2, 41):
    # first KEYSIZE worth of bytes, and the second KEYSIZE worth of bytes
    info = [bin_text_arr[i:i+keysize]
            for i in range(0, len(bin_text_arr), keysize)]

    total_normalized_distance = 0

    # average the hamming distance between sets of bytes
    for a, b in zip(info[::2], info[1::2]):
        edit_distance = utils.hamming_distance(''.join(a), ''.join(b))
        # Normalize this result by dividing by KEYSIZE.
        total_normalized_distance += float(edit_distance) / float(keysize)

    avg_distance = float(total_normalized_distance) / float(len(info))
def runTest():

    print
    print 'sort_dict_by_value'
    utils.test(sort_dict_by_value({'Japan':23, 'Germany':4, 'Brazil':2, 'China':89}),
                                  {'Brazil':2, 'Germany':4, 'Japan':23, 'China':89})
Example #40
0
#!/usr/bin/env python

from utils import test, sxor

test('746865206b696420646f6e277420706c6179',
     sxor('1c0111001f010100061a024b53535009181c'.decode('hex'),
          '686974207468652062756c6c277320657965'.decode('hex')).encode('hex'))
def runTest():
  print 'verbing'
  utils.test(verbing('hail'), 'hailing')
  utils.test(verbing('swiming'), 'swimingly')
  utils.test(verbing('do'), 'do')

  print
  print 'not_bad'
  utils.test(not_bad('This movie is not so bad'), 'This movie is good')
  utils.test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
  utils.test(not_bad('This tea is not hot'), 'This tea is not hot')
  utils.test(not_bad("It's bad yet not"), "It's bad yet not")

  print
  print 'front_back'
  utils.test(front_back('abcd', 'xy'), 'abxcdy')
  utils.test(front_back('abcde', 'xyz'), 'abcxydez')
  utils.test(front_back('Kitten', 'Donut'), 'KitDontenut')
Example #42
0
import utils
utils.make()
utils.test()
utils.print_all()
Example #43
0
File: p44.py Project: icot/euler
#!/usr/bin/python

from utils import test_pent as test

if __name__ == "__main__":
    limit = 10000
    print "Generating pentagonal numbers"
    a = [ x*(3*x -1)/2 for x in xrange(1,limit) ]
    b = [ x*(3*x -1)/2 for x in xrange(1,limit) ]
    print "Generating pairs"
v v v v v v v
    pairs = [(a[i],b[j], abs(a[i]-b[j])) for i in xrange(1,len(a)) for j in xrange(1,len(b)) if test(abs(a[i]-b[j])) and test(a[i]+b[j])]
    for pair in pairs:
        print pair
*************
    pairs = []
    minr = 1e6
    for i in xrange(1, limit):
        p1 = i*(3*i -1)/2
        for k in xrange(i+1, limit):
            p2 = k*(3*k -1)/2  
            sik = (p1+p2)
            rik = abs(p1 -p2)
            if rik < minr:
                if rik in p and sik in p:
                    pairs.append((p1, p2, rik))
                    minr = rik
            print i, k, p1, p2, sik, rik, minr
    print pairs

^ ^ ^ ^ ^ ^ ^
Example #44
0
def parse_ath_message(mycode, mystr):
		if mycode.startswith(".bf ", 0, 4) == True:
			mycode = mycode.replace(".bf ", "")
			irc.send_msg(bfc.compile_bf(mycode), variables.channel)
			signal.alarm(0)
		elif mycode.startswith(".be ", 0, 4) == True:
			mycode = mycode.replace(".be ", "")
			irc.send_msg(bec.compile_be(mycode), variables.channel)
			signal.alarm(0)
		elif mycode.startswith(".ul ", 0, 4) == True:
			print mycode
			mycode = mycode.replace(".ul ", "")
			irc.send_msg(ulc.compile_ul(mycode), variables.channel)
			signal.alarm(0)
		elif mycode.startswith(".test", 0, 5) == True:
			utils.test()
		elif mycode.startswith(".eval", 0, 5) == True:
			output = "Result: " + utils.evaluate_expression(mycode)
			irc.send_msg(output, variables.channel)
		elif mycode.startswith(".d2h", 0, 4) == True:
			utils.decimal_to_hex(mycode)
		elif mycode.startswith(".h2d", 0, 4) == True:
			utils.hex_to_decimal(mycode)
		elif mycode.startswith(".d2b", 0, 4) == True:
			utils.decimal_to_bin(mycode)
		elif mycode.startswith(".h2b", 0, 4) == True:
			utils.hex_to_bin(mycode)
		elif mycode.startswith(".b2h", 0, 4) == True:
			utils.bin_to_hex(mycode)
		elif mycode.startswith(".b2d", 0, 4) == True:
			utils.bin_to_decimal(mycode)
		elif mycode.startswith(".join", 0, 5) == True:
			user = mystr['PREFIX']
			user = user.split('!')
			if user[0] != variables.head_user:
				irc.send_msg("You need to be: " + variables.head_user + " to make me join a channel", variables.channel)	
			else:
				mychan = mycode.replace(".join", "")
				mychan = mychan.replace(" ", "")
				if mychan == "0":
					irc.send_msg(variables.i_hate_you, variables.channel)
				else:
					irc.join_channel(mychan)

		elif mycode.startswith(".leave", 0, 6):
			user = mystr['PREFIX']
			user = user.split('!')
			if user[0] != variables.head_user:
				irc.send_msg("You need to be: " + variables.head_user + " to make me leave a channel", variables.channel)	
			else:
				mychan = mycode.replace(".leave", "")
				mychan = mychan.replace(" ", "")
				if mychan == "0":
					irc.send_msg(variables.i_hate_you, variables.channel)
				else:
					irc.leave_channel(mychan)
		elif mycode.startswith(".time ", 0, 6) == True:
			utils.get_time(mycode)
		elif mycode.startswith(".ccount ", 0, 8) == True:
			mycode = mycode.replace(".ccount ", "")
			length = 0
			if check_if_unicode(mycode) == True:
				length = unicode_len(mycode)
			else:
				length = len(mycode)
			irc.send_msg("Length: " + str(length), variables.channel)
		elif mycode.startswith(".help", 0, 5) == True:
			irc.send_msg(help.athena_help, variables.channel)
		elif mycode.startswith(".list", 0, 5):
			irc.send_msg("List of modules: " + str(list(imports())), variables.channel)	
		elif mycode.startswith(".xkcd ", 0, 6):
			irc.send_msg(u'\u200b' + xkcd.search_xkcd(mycode.replace(".xkcd ", "")), variables.channel)
		elif mycode.startswith(".tr ", 0, 4):
			mycode = mycode.replace(".tr ", "")
			lang = mycode[:2]
			mycode = mycode.replace(lang, "")
			irc.send_msg(utils.translate_lang(lang, mycode), variables.channel)
		elif mycode.startswith(".bftxt ", 0, 7):
			mycode = mycode.replace(".bftxt ", "")
			irc.send_msg("Output: " + bftxt.bf(mycode), variables.channel)
		elif mycode.startswith(".df ", 0, 4):
			mycode = mycode.replace(".df ", "")
			irc.send_msg("Output: " + deadfish.compile_df(mycode), variables.channel)
		elif mycode.startswith(".source", 0, 7):
			irc.send_msg(u"\u200bhttps://github.com/Benderx2/athena_bot", variables.channel)
Example #45
0
#!/usr/bin/env python2

from utils import rep_key_xor, test

phrase = 'Burning \'em, if you ain\'t quick and nimble\n' \
         'I go crazy when I hear a cymbal'

test(rep_key_xor(phrase).encode('hex'),
     '0b3637272a2b2e63622c2e69692a23693a2a3c6'
     '324202d623d63343c2a26226324272765272a28'
     '2b2f20430a652e2c652a3124333a653e2b20276'
     '30c692b20283165286326302e27282f')