Beispiel #1
0
def main():
    args = process_args()
    print("using:", device)

    # dataset setup
    XYtrainLoader, XYvalidLoader, XYtestLoader, prior, dim = load_dataset(
        args.dataset,
        args.labeled,
        args.unlabeled,
        args.batchsize,
        with_bias=True,
        resample_model=args.model)

    # model setup
    loss_type = select_loss(args.loss)
    selected_model = select_model(args.model)
    model = selected_model(dim)
    models = {
        "nnPU": copy.deepcopy(model).to(device),
        "nnPUSB": copy.deepcopy(model).to(device)
    }
    loss_funcs = {
        "nnPU": nnPUloss(prior,
                         loss=loss_type,
                         gamma=args.gamma,
                         beta=args.beta),
        "nnPUSB": nnPUSBloss(prior, gamma=args.gamma, beta=args.beta)
    }

    # trainer setup
    optimizers = {
        k: make_optimizer(v, args.stepsize)
        for k, v in models.items()
    }
    print("input dim: {}".format(dim))
    print("prior: {}".format(prior))
    print("loss: {}".format(args.loss))
    print("batchsize: {}".format(args.batchsize))
    print("model: {}".format(selected_model))
    print("beta: {}".format(args.beta))
    print("gamma: {}".format(args.gamma))
    print("")

    # run training
    # training
    PUtrainer = trainer(models, loss_funcs, optimizers, XYtrainLoader,
                        XYvalidLoader, XYtestLoader, prior)
    PUtrainer.run(args.epoch)
Beispiel #2
0
        layer5 = self._compute_cond_module(self.res5, layer4)

        ref1 = self.refine1([layer5], layer5.shape[2:])
        ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
        ref31 = self.refine31([layer31, ref2], layer31.shape[2:])
        ref3 = self.refine3([layer3, ref31], layer3.shape[2:])
        ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
        output = self.refine5([layer1, ref4], layer1.shape[2:])

        output = self.normalizer(output)
        output = self.act(output)
        output = self.end_conv(output)

        return output


if __name__ == '__main__':
    from args import process_args
    args = process_args()

    if args.model_choice == 'ncsnv2':
        model = NCSNv2(norm_type=args.norm_type,act_type=args.act)
    elif args.model_choice == 'ncsnv2_deeper':
        model = NCSNv2Deeper(norm_type=args.norm_type,act_type=args.act)
    elif args.model_choice == 'ncsnv2_deepest':
        model = NCSNv2Deepest(norm_type=args.norm_type,act_type=args.act)

    print(model)

    print(torch.rand(64,3,32,32).shape, model(torch.rand(64,3,32,32)).shape)
Beispiel #3
0
                    args.exp_dir, "finetune_%s_generator_complete.pth" % task.name
                )

            save_model(finetune_generator_complete_ckpt, sup_model["generator"])

            finetune_discriminator_complete_ckpt = os.path.join(
                    args.exp_dir, "finetune_%s_discriminator_complete.pth" % task.name
                )

            save_model(finetune_discriminator_complete_ckpt, sup_model["discriminator"])
        
        else:
            finetune_complete_ckpt = os.path.join(
                    args.exp_dir, "finetune_%s_complete.pth" % task.name
                )

            save_model(finetune_complete_ckpt, sup_model)
        

    # evaluate
    # TODO: evaluate result on test split, write prediction for leaderboard submission (for dataset
    # without test labels)
    log.info("Done")
    return


if __name__ == "__main__":
    args = parser.parse_args()
    process_args(args)
    main(args)
Beispiel #4
0
from transactions import TransactionsList
from args import process_args
from apriori import Apriori
from dic import Dic
from stats import Stats
from rules import RulesGenerator
from writer import Writer

def main(args):
    stats = Stats()
    transactions = TransactionsList(args.infile)
    if args.algorithm == 'apriori':
        algorithm = Apriori(transactions, args.minsup)
    else:
        algorithm = Dic(transactions, args.minsup, args.m)
    large_sets, counter = algorithm.get_large_sets_and_counter()
    stats.record_post_large_sets()
    rules = RulesGenerator.generate_rules(large_sets, args.minconf, counter, transactions)
    stats.record_post_rules()

    writer = Writer(args.outfile)
    writer.add_args(args)
    writer.add_stats(stats)
    writer.add_rules(rules)
    writer.write()

if __name__ == '__main__':
    args = process_args()
    main(args)
Beispiel #5
0
        shadow_buffers = OrderedDict(self.shadow.named_buffers())

        # check if both model contains the same set of keys
        assert model_buffers.keys() == shadow_buffers.keys()

        for name, buffer in model_buffers.items():
            # buffers are copied
            shadow_buffers[name].copy_(buffer)

    def forward(self, inputs):
        return self.shadow(inputs)


if __name__ == '__main__':
    import args
    args = args.process_args()
    import data
    test_data = data.Data(args, 'test')

    r = torch.normal(mean=10, std=1, size=[64, 2048]).numpy()
    m = np.mean(r, axis=0)
    c = np.cov(r, rowvar=False)
    print(m.shape, c.shape, (m + 1).shape, (c + 1).shape)
    print(calculate_frechet_distance(m, c, m + 1e-1, c + 1e-1))
    loader = torch.utils.data.DataLoader(test_data, 5000)
    for i, batch in enumerate(loader):
        if i > 1:
            break
        # print(torch.unique(batch))
        batch = (batch * 2) - 1
        print(inception_score(batch, False, 8, True, 10))