Esempio n. 1
0
                        type=str,
                        default='./logfile',
                        metavar='LOG_PATH',
                        help='logfile path, tensorboard format')
    parser.add_argument('--savedir',
                        type=str,
                        default='./models',
                        metavar='SAVE_PATH',
                        help='saving path, pickle format')
    args = parser.parse_args()
    args.cuda = args.cuda and torch.cuda.is_available()
    np.random.seed(args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # That is how you usually build the dataset
    #dataset = CoraDataset(feature_file = './data/cora.features',
    #     edge_file = './data/cora_edgelist', label_file = './data/cora_label')
    #dataset.read_embbedings('./embedding/embedding_line_cora')
    #dataset.setting(20, 1000)

    # but we load the example of cora
    with open('cora.dataset', 'rb') as fdata:
        dataset = pkl.load(fdata, encoding='iso-8859-1')
    gan = GraphSGAN(Generator(200, dataset.k + dataset.d),
                    Discriminator(dataset.k + dataset.d, dataset.m), dataset,
                    args)
    gan.train()
Esempio n. 2
0
                        type=float,
                        default=1,
                        metavar='N',
                        help='scale factor between labeled and unlabeled data')
    parser.add_argument('--logdir',
                        type=str,
                        default='./logfile',
                        metavar='LOG_PATH',
                        help='logfile path, tensorboard format')
    parser.add_argument('--savedir',
                        type=str,
                        default='./models',
                        metavar='SAVE_PATH',
                        help='saving path, pickle format')
    parser.add_argument('--d_repeat',
                        type=int,
                        default=5,
                        metavar='DR',
                        help='training D repeat times (default: 5)')
    args = parser.parse_args()
    args.cuda = args.cuda and torch.cuda.is_available()
    np.random.seed(args.seed)

    preprocessor = PreProcessor()
    gan = ImprovedGAN(Generator(100), Discriminator(),
                      preprocessor.labeled_dataset(),
                      preprocessor.unlabeled_dataset(),
                      preprocessor.test_dataset(), args)
    gan.train()
    gan.eval()
Esempio n. 3
0
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=100, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--eval-interval', type=int, default=1, metavar='N',
                        help='how many batches to wait before evaling training status')
    parser.add_argument('--unlabel-weight', type=float, default=0.5, metavar='N',
                        help='scale factor between labeled and unlabeled data')
    parser.add_argument('--logdir', type=str, default='./logfile', metavar='LOG_PATH', help='logfile path, tensorboard format')
    parser.add_argument('--savedir', type=str, default='./models', metavar='SAVE_PATH', help = 'saving path, pickle format')
    args = parser.parse_args()
    args.cuda = args.cuda and torch.cuda.is_available()
    np.random.seed(args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # That is how you usually build the dataset 
    #dataset = CoraDataset(feature_file = './data/cora.features', 
    #     edge_file = './data/cora_edgelist', label_file = './data/cora_label')
    #dataset.read_embbedings('./embedding/embedding_line_cora')
    #dataset.setting(20, 1000)
    

    # but we load the example of cora
    with open('cora.dataset', 'rb') as fdata:
        dataset = pkl.load(fdata, encoding='latin1')
    gan = GraphSGAN(Generator(200, dataset.k + dataset.d), Discriminator(dataset.k + dataset.d, dataset.m), dataset, args)
    gan.train() 
    
Esempio n. 4
0
        '--eval-interval',
        type=int,
        default=1,
        metavar='N',
        help='how many batches to wait before evaling training status')
    parser.add_argument('--unlabel-weight',
                        type=float,
                        default=1,
                        metavar='N',
                        help='scale factor between labeled and unlabeled data')
    parser.add_argument('--logdir',
                        type=str,
                        default='./logfile',
                        metavar='LOG_PATH',
                        help='logfile path, tensorboard format')
    parser.add_argument('--savedir',
                        type=str,
                        default='./models',
                        metavar='SAVE_PATH',
                        help='saving path, pickle format')

    args = parser.parse_args()
    args.cuda = args.cuda and torch.cuda.is_available()

    np.random.seed(args.seed)
    #gan = ImprovedGAN(Generator(100), Discriminator(), MnistLabel(10), MnistUnlabel(), MnistTest(), args)
    path = "/scratch/ks4883/dl_data/"
    gan = ImprovedGAN(Generator(64), Discriminator(), DL_Label(path, 60),
                      DL_Unlabel(path), DL_Test(path), args)
    gan.train()
                        default='./logfile',
                        metavar='LOG_PATH',
                        help='logfile path, tensorboard format')
    parser.add_argument('--savedir',
                        type=str,
                        default='./models',
                        metavar='SAVE_PATH',
                        help='saving path, pickle format')
    args = parser.parse_args()

    args.cuda = args.cuda and torch.cuda.is_available()
    np.random.seed(args.seed)

    device = torch.device("cuda:0" if args.cuda else "cpu")
    cudnn.benchmark = True

    gan = ImprovedGAN(Generator(100), Discriminator(), MnistLabel(10),
                      MnistUnlabel(), MnistVal(), args)

    # gan = ImprovedGAN(Generator(100, output_dim = 64 * 64 * 3),
    #                   Discriminator(input_dim = 64 * 64 * 3, output_dim = 1000),
    #                   ImageNetLabel(1000, 2), ImageNetUnlabel(), ImageNetVal(),
    #                   args)

    # gan = ImprovedGAN(Generator(z_dim=100, nc=3).to(device),
    #                   Discriminator(nc = 3, output_units = 1000).to(device),
    #                   ImageNetLabel(1000, 2), ImageNetUnlabel(), ImageNetVal(),
    #                   args)

    gan.train()
Esempio n. 6
0
        default=1,
        metavar='N',
        help='how many batches to wait before evaling training status')
    parser.add_argument('--unlabel-weight',
                        type=float,
                        default=1,
                        metavar='N',
                        help='scale factor between labeled and unlabeled data')
    parser.add_argument('--logdir',
                        type=str,
                        default='./logfile',
                        metavar='LOG_PATH',
                        help='logfile path, tensorboard format')
    parser.add_argument('--savedir',
                        type=str,
                        default='./models',
                        metavar='SAVE_PATH',
                        help='saving path, pickle format')
    args = parser.parse_args()
    args.cuda = args.cuda and torch.cuda.is_available()
    if args.cuda:
        print("Training with GPU")

    np.random.seed(args.seed)
    # gan = ImprovedGAN(Generator(100), Discriminator(), MnistLabel(10), MnistUnlabel(), MnistTest(), args)
    gan = ImprovedGAN(Generator(z_dim=1), Discriminator(), args)
    gan.train()
    # gan.test = gan.data.load_train_data_sup()
    # print(gan.eval() / gan.test.dataset.__len__())
    # gan.test = gan.data.load_val_data()
    print(gan.eval() / gan.test.dataset.__len__())
Esempio n. 7
0
        default=1,
        metavar="N",
        help="scale factor between labeled and unlabeled data",
    )
    parser.add_argument(
        "--logdir",
        type=str,
        default="./logfile",
        metavar="LOG_PATH",
        help="logfile path, tensorboard format",
    )
    parser.add_argument(
        "--savedir",
        type=str,
        default="./models",
        metavar="SAVE_PATH",
        help="saving path, pickle format",
    )
    args = parser.parse_args()
    args.cuda = args.cuda and torch.cuda.is_available()
    np.random.seed(args.seed)
    gan = ImprovedGAN(
        Generator(100),
        Discriminator(),
        MnistLabel(10),
        MnistUnlabel(),
        MnistTest(),
        args,
    )
    gan.train()