Beispiel #1
0
def parse_args(config_file='configs.json'):
    ALL_CONFIG = json.load(open(config_file))

    CONFIG = ALL_CONFIG["data_agnostic_configs"]

    # Set default values for data-agnostic configurations
    DEMO = CONFIG["demo"]
    DATASET = CONFIG["dataset"]
    BASIS = CONFIG["basis"]
    LR = CONFIG["learning_rate"]
    MOM = CONFIG["momentum"]
    WD = CONFIG["weight_decay"]
    NUM_ITER = CONFIG["number_iterations"]
    NUM_RESTARTS = CONFIG["number_restarts"]

    parser = argparse.ArgumentParser()
    parser.add_argument('--DEMO', type=str, default=DEMO, \
            help='demo, boolean. Set True to run method over subset of 5 images \
             (default). Set False to run over entire dataset.'                                                              )
    parser.add_argument('--DATASET', type=str, default=DATASET,\
            help='dataset, DEFAULT=mnist. SUPPORTED=[mnist, xray]')
    parser.add_argument('--BASIS', nargs='+', type=str, default=BASIS,\
        help='basis, DEFAULT=csdip. SUPPORTED=[csdip, dct, wavelet]')
    parser.add_argument('--LR', type=float, default=LR,\
      help='learning rate, DEFAULT=' + str(LR))
    parser.add_argument('--MOM', type=float, default=MOM,\
      help='RMSProp momentum hyperparameter, DEFAULT=' + str(MOM))
    parser.add_argument('--WD', type=float, default=WD,\
      help='l2 weight decay hyperparameter, DEFAULT=' + str(WD))
    parser.add_argument('--NUM_ITER', type=int, default=NUM_ITER,\
      help='number of iterative weight updates, DEFAULT=' + str(NUM_ITER))
    parser.add_argument('--NUM_RESTARTS', type=int, default=NUM_RESTARTS,\
      help='number of restarts, DEFAULT=' + str(NUM_RESTARTS))
    parser.add_argument('--NUM_MEASUREMENTS', nargs='+', type=int, default = None, \
            help='number of measurements, DEFAULT dependent on dataset')

    args = parser.parse_args()

    # set values for data-specific configurations
    SPECIFIC_CONFIG = ALL_CONFIG[args.DATASET]
    args.IMG_SIZE = SPECIFIC_CONFIG["img_size"]
    args.NUM_CHANNELS = SPECIFIC_CONFIG["num_channels"]
    args.Z_DIM = SPECIFIC_CONFIG["z_dim"]  #input seed

    NUM_MEAS_DEFAULT = SPECIFIC_CONFIG["num_measurements"]
    if not args.NUM_MEASUREMENTS:
        args.NUM_MEASUREMENTS = NUM_MEAS_DEFAULT

    print(args)

    utils.check_args(args)  # check to make sure args are correct

    return args
def main():

	# Check and get arguments
	args = utils.check_args()
	configFile = args.config
	directory = args.directory
	gz = args.gz

	# If not config file within arguments
	if not configFile:
		utils.printWarning("Didn't find '--config' param. Try to read 'config.json'...")
		configFile = utils.is_valid_json_file('config.json')

	# Try read json config file
	try:
		config = utils.read_json(configFile)
		config = config['config']
	## TODO Exception to handle
	finally:
		utils.printSuccess("DONE")

	# If config was loaded
	if config:
		try:
			backupped = utils.make_backup(config, directory, gz)
		finally:
			utils.printSuccess("DONE")

		utils.printSuccess("Close connection...")
		utils.printSuccess("DONE")
def main():
    flag_plot, flag_metrics = check_args()
    if (flag_plot, flag_metrics) == (-1, -1):
        return
    (mileage, price) = get_data()
    if (mileage, price) == (-1, -1):
        return
    # Normalize mileage and price for faster computation
    mileage_norm = [float(mileage[i])/max(mileage) for i in range(len(mileage))]
    price_norm = [float(price[i])/max(price) for i in range(len(price))]
    # Perform gradient descent. Learning rate can be tuned here.
    [theta_0, theta_1] = gradient_descent(mileage_norm, price_norm, l_r=0.1)
    # Denormalize
    theta_0 = theta_0 * max(price)
    theta_1 = theta_1 * (max(price) / max(mileage))
    # Input theta values in the .csv file
    write_thetas(theta_0, theta_1)

    # Display information according to flags
    if flag_metrics == 1:
        display_metrics(theta_0, theta_1, mileage, price)
    if flag_plot == 1:
        mse = list()
        for i in range(len(mileage)):
            mse.append(predict(theta_0, theta_1, mileage[i]))
        display_plot(mileage, price, mse)
Beispiel #4
0
def parse_args(config_file='configs.json'):
    ALL_CONFIG = json.load(open(config_file))

    CONFIG = ALL_CONFIG["data_agnostic_configs"]

    # Set default values for data-agnostic configurations
    DEMO = CONFIG["demo"]
    DATASET = CONFIG["dataset"]
    ALG = CONFIG["alg"]
    NUM_ITER = CONFIG["num_iterations"]
    parser = argparse.ArgumentParser()

    parser.add_argument('--DATASET', type=str, default=DATASET,\
            help='dataset, DEFAULT=mnist. SUPPORTED=[mnist, xray, retino]')
    parser.add_argument('--ALG', nargs='+', type=str, default=ALG,\
            help='algorithm, DEFAULT=csdip. SUPPORTED=[csdip, dct, wavelet]. BM3D-AMP, TVAL3 must be run in Matlab.')
    parser.add_argument('--NUM_MEASUREMENTS', nargs='+', type=int, default = None, \
            help='number of measurements, DEFAULT dependent on dataset.')
    parser.add_argument('--DEMO', type=str, default=DEMO, \
            help='demo, boolean. Set True to run method over subset of 5 images \
             (default). Set False to run over entire dataset.'                                                              )
    parser.add_argument('--NUM_ITER', type=int, default=NUM_ITER,\
      help='number of iterative weight updates, DEFAULT=' + str(NUM_ITER))
    parser.add_argument('--NUM_RESTARTS', type=int, default= None,\
      help='number of restarts, DEFAULT dependent on dataset.')

    args = parser.parse_args()

    # set values for data-specific configurations
    SPECIFIC_CONFIG = ALL_CONFIG[args.DATASET]
    args.IMG_SIZE = SPECIFIC_CONFIG["img_size"]
    args.NUM_CHANNELS = SPECIFIC_CONFIG["num_channels"]
    args.Z_DIM = SPECIFIC_CONFIG["z_dim"]  #input seed
    args.LR_FOLDER = SPECIFIC_CONFIG["lr_folder"]

    # if data-specific arg not set by user
    if not args.NUM_MEASUREMENTS:
        args.NUM_MEASUREMENTS = SPECIFIC_CONFIG["num_measurements"]
    if not args.NUM_RESTARTS:
        args.NUM_RESTARTS = SPECIFIC_CONFIG["num_restarts"]

    utils.check_args(args)  # check to make sure args are correct

    return args
def main():
    parser = build_parser()
    args = parser.parse_args()
    check_args(args)
    check_args_to_run(args)

    device = torch.device('cuda') if args.gpu and torch.cuda.is_available(
    ) else torch.device('cpu')

    if args.subcmd == 'citation':
        dataset = load_dataset(args.dataset)

        hparams = {
            'input_dim': dataset.num_node_features,
            'hidden_dim': args.hidden_dim,
            'output_dim': dataset.num_classes,
            'n_layers': args.n_layers,
            'dropout': args.dropout,
            'edge_dropout': args.edge_dropout,
            'layer_wise_dropedge': args.layer_wise_dropedge
        }

        model_name = f'{args.model}-{args.n_layers}-hidden_dim={args.hidden_dim}-dropout={args.dropout}-edge_dropout={args.edge_dropout}-LW={args.layer_wise_dropedge}'
        model_path = f'pretrained_models/{model_name}_{args.dataset.lower()}' + '_{}.pth'

        histories = train_for_citation(args=args,
                                       hparams=hparams,
                                       dataset=dataset,
                                       device=device,
                                       model_path=model_path)

        plot_training(
            histories,
            title=f'{model_name} / {args.dataset.title()}',
            metric_name='accuracy',
            save_path=f'images/{model_name}_{args.dataset.lower()}.png')
Beispiel #6
0
def parse_args():
    # 创建解释器对象ArgumentParser
    parser = argparse.ArgumentParser(
        description="Tensorflow implementation of GAN Variants")
    # 添加可选参数
    parser.add_argument('--gan_type',
                        type=str,
                        default='GAN',
                        choices=['GAN', 'CGAN'],
                        help='The type of GAN',
                        required=True)
    parser.add_argument('--dataset',
                        type=str,
                        default='fashion-mnist',
                        help='The name of dataset')
    parser.add_argument('--epoch',
                        type=int,
                        default=20,
                        help='The number of epochs to run')
    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='The size of batch')
    parser.add_argument('--z_dim',
                        type=int,
                        default=62,
                        help='Dimension of noise vector')
    parser.add_argument('--checkpoint_dir',
                        type=str,
                        default='checkpoint',
                        help='Directory name to save the checkpoints')
    parser.add_argument('--result_dir',
                        type=str,
                        default='results',
                        help='Directory name to save the generated images')
    parser.add_argument('--log_dir',
                        type=str,
                        default='logs',
                        help='Directory name to save training logs')

    return check_args(parser.parse_args())
Beispiel #7
0
    def exec_command(self, command, args):
        """
        the core method of commands exec, it tries to fetch the requested command,
        bind to the right context and call the associated function

        TODO:
        :param command: requested command
        :param args: arguments array
        :return:
        """

        # save the found command and sub_command array
        complete_command_array = [command]
        try:
            if command == '':
                return

            if command in self.commands_map:

                # if we found the command but has the "ref" property,
                # so we need to reference to another object. Ex. short command q --references--> quit
                if 'ref' in self.commands_map[command]:
                    com = self.commands_map[self.commands_map[command]['ref']]
                else:
                    com = self.commands_map[command]

                # if we have no arguments no sub_command exist, else save the first argument
                last_function = False
                if len(args) > 0:
                    possible_sub_command = args[0]
                else:
                    possible_sub_command = None

                # now iterate while we have a valid sub_command,
                # when we don't find a valid sub_command exit and the new command will be the sub_command
                # save the sub_command parent
                prev_command = com
                while last_function is False:
                    # if the sub command is a ref, catch the right command
                    if 'ref' in com:
                        com = prev_command['sub_commands'][com['ref']]
                    if 'sub_commands' in com and possible_sub_command:
                        if possible_sub_command in com['sub_commands']:
                            prev_command = com
                            com = com['sub_commands'][possible_sub_command]
                            # pop the found sub_command so we can iterate on the remanings arguments
                            complete_command_array.append(args.pop(0))
                            command = possible_sub_command
                            # if there are arguments left
                            if len(args) > 0:
                                # take the first args (the next sub_command)
                                possible_sub_command = args[0]
                            else:
                                last_function = True
                        else:
                            last_function = True
                    else:
                        last_function = True

                # if the sub_command is a reference to another associated sub_command
                if 'ref' in com:
                    com = prev_command['sub_commands'][com['ref']]

                # if we have a function field just fetch the context and the function name,
                # bind them and call the function passing the arguments
                if 'function' in com:
                    if 'args' in com['function']:
                        args_check, args_error = utils.check_args(
                            com['function']['args'], args)
                        if args_check is False:
                            raise Exception(args_error)

                    context = self.context_map[com["function"]["context"]]
                    funct = com["function"]["f"]
                    call_method = getattr(context, funct)
                    # we pass the command name (could be useful for the called function)
                    # and possible arguments to the function
                    call_method(command, *args)
                else:
                    # if we have no method implementation of the command
                    # print the help of the command
                    # passing all the arguments list to help function (including the command) in a unique array
                    self.exec_command('help', complete_command_array)

            else:
                print("command '" + command + "' not found")
        except Exception as e:
            if isinstance(e, UcError):
                print(utils.titlify('uc error'))
                print(str(e))
            else:
                print(utils.error_format('exec_command', str(e)))
                self.exec_command('help', complete_command_array)
Beispiel #8
0
def parse_args():
    desc = "Pytorch implementation of GAN collections"
    parser = argparse.ArgumentParser(description=desc)

    parser.add_argument('--gan_type',
                        type=str,
                        default='GAN',
                        choices=[
                            'GAN', 'Classifier', 'CGAN', 'BEGAN', 'WGAN',
                            'VAE', "CVAE", "WGAN_GP"
                        ],
                        help='The type of GAN')  # , required=True)
    parser.add_argument('--dataset',
                        type=str,
                        default='mnist',
                        choices=['mnist', 'fashion-mnist'],
                        help='The name of dataset')
    parser.add_argument('--conditional', type=bool, default=False)

    parser.add_argument('--dir',
                        type=str,
                        default='./',
                        help='Working directory')
    parser.add_argument('--save_dir',
                        type=str,
                        default='models',
                        help='Directory name to save the model')
    parser.add_argument('--result_dir',
                        type=str,
                        default='results',
                        help='Directory name to save results')
    parser.add_argument('--sample_dir',
                        type=str,
                        default='Samples',
                        help='Directory name to save the generated images')
    parser.add_argument('--log_dir',
                        type=str,
                        default='logs',
                        help='Directory name to save training logs')

    parser.add_argument('--epoch',
                        type=int,
                        default=25,
                        help='The number of epochs to run')
    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='The size of batch')
    parser.add_argument('--num_examples',
                        type=int,
                        default=50000,
                        help='The number of examples to use for train')
    parser.add_argument('--tau',
                        type=float,
                        default=0.0,
                        help='ratio of training data.')
    parser.add_argument('--size_epoch', type=int, default=1000)
    parser.add_argument('--gpu_mode', type=bool, default=True)
    parser.add_argument('--device', type=int, default=0)

    parser.add_argument('--lrG', type=float, default=0.0002)
    parser.add_argument('--lrD', type=float, default=0.0002)
    parser.add_argument('--lrC', type=float, default=0.01)
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--beta1', type=float, default=0.5)
    parser.add_argument('--beta2', type=float, default=0.999)

    parser.add_argument('--seed', type=int, default=1664)
    parser.add_argument('--classify', type=bool, default=False)
    parser.add_argument('--TrainEval', type=bool, default=False)
    parser.add_argument('--knn', type=bool, default=False)
    parser.add_argument('--IS', type=bool, default=False)
    parser.add_argument('--FID', type=bool, default=False)
    parser.add_argument('--train_G', type=bool, default=False)

    return check_args(parser.parse_args())
        type=str,
        default=
        'check_points/pretrain_models/albert_large_zh/pytorch_albert_model.pth'
    )
    parser.add_argument('--checkpoint_dir',
                        type=str,
                        default='check_points/DRCD/albert_large_zh/')
    parser.add_argument('--setting_file', type=str, default='setting.txt')
    parser.add_argument('--log_file', type=str, default='log.txt')

    # use some global vars for convenience
    args = parser.parse_args()
    args.checkpoint_dir += ('/epoch{}_batch{}_lr{}_warmup{}_anslen{}/'.format(
        args.train_epochs, args.n_batch, args.lr, args.warmup_rate,
        args.max_ans_length))
    args = utils.check_args(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
    device = torch.device("cuda")
    n_gpu = torch.cuda.device_count()
    print("device %s n_gpu %d" % (device, n_gpu))
    print("device: {} n_gpu: {} 16-bits training: {}".format(
        device, n_gpu, args.float16))

    # load the bert setting
    if 'albert' not in args.bert_config_file:
        bert_config = BertConfig.from_json_file(args.bert_config_file)
    else:
        bert_config = ALBertConfig.from_json_file(args.bert_config_file)

    # load data
    print('loading data...')
Beispiel #10
0
def main():
    # set_rnd_seed(31)   # reproducibility

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--data_dir_train',
                        type=str,
                        default='./data/brats_19/Train',
                        metavar='DATA_TRAIN',
                        help="data train directory")
    parser.add_argument('--data_dir_val',
                        type=str,
                        default='./data/brats_19/Validation',
                        metavar='DATA_VAL',
                        help="data validation directory")
    parser.add_argument('--log_dir',
                        type=str,
                        default='logs/',
                        metavar='LOGS',
                        help="logs directory")
    parser.add_argument('--models_dir',
                        type=str,
                        default='models/',
                        metavar='MODELS',
                        help="models directory")
    parser.add_argument('--batch_size',
                        type=int,
                        default=16,
                        metavar='BATCH',
                        help="batch size")
    parser.add_argument('--learning_rate',
                        type=float,
                        default=2.0e-5,
                        metavar='LR',
                        help="learning rate")
    parser.add_argument('--epochs',
                        type=int,
                        default=1e6,
                        metavar='EPOCHS',
                        help="number of epochs")
    parser.add_argument('--zdim',
                        type=int,
                        default=16,
                        metavar='ZDIM',
                        help="Number of dimensions in latent space")
    parser.add_argument('--load',
                        type=str,
                        default='',
                        metavar='LOADDIR',
                        help="time string of previous run to load from")
    parser.add_argument('--binary_input',
                        type=bool,
                        default=False,
                        metavar='BINARYINPUT',
                        help="True=one input channel for each tumor structure")
    parser.add_argument('--use_age',
                        type=bool,
                        default=False,
                        metavar='AGE',
                        help="use age in prediction")
    parser.add_argument('--use_rs',
                        type=bool,
                        default=False,
                        metavar='RESECTIONSTATUS',
                        help="use resection status in prediction")

    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Device in use: {}".format(device))
    torch.set_default_tensor_type(torch.cuda.FloatTensor if torch.cuda.
                                  is_available() else torch.FloatTensor)

    logdir_suffix = '-%s-zdim=%d-beta=5000-alpha=%.5f-lr=%.5f-gamma=%d-batch=%d' % (
        args.data_dir_train.replace("Train", "").replace(".", "").replace(
            "/",
            ""), args.zdim, alpha, args.learning_rate, gamma, args.batch_size)
    if args.use_age:
        logdir_suffix += "-age"
    if args.use_rs:
        logdir_suffix += "-rs"
    if args.binary_input:
        logdir_suffix += "-binary_input"
    if args.load == "":
        date_str = str(dt.now())[:-7].replace(":", "-").replace(
            " ", "-") + logdir_suffix
    else:
        date_str = args.load
    args.models_dir = join(args.models_dir, date_str)
    args.log_dir = join(args.log_dir, date_str)
    os.makedirs(args.log_dir, exist_ok=True)
    os.makedirs(args.models_dir, exist_ok=True)
    check_args(args)
    writer = SummaryWriter(args.log_dir + '-train')

    ## Get dataset

    data = get_all_data(args.data_dir_train,
                        args.data_dir_val,
                        orig_data_shape,
                        binary_input=args.binary_input)

    x_data_train_labeled, x_data_train_unlabeled, x_data_val, y_data_train_labeled, y_data_val, y_dim = data
    if args.binary_input:
        n_labels = x_data_train_labeled.shape[1]
    else:
        n_labels = len(
            np.bincount(x_data_train_labeled[:10].astype(np.int8).flatten()))
    x_data_train_labeled = x_data_train_labeled.astype(np.int8)
    x_data_train_unlabeled = x_data_train_unlabeled.astype(np.int8)
    x_data_val = x_data_val.astype(np.int8)

    if args.use_age:
        age_std = 12.36
        age_mean = 62.2
        age_l = np.expand_dims(np.load(join(args.data_dir_train, "age_l.npy")),
                               1)
        age_u = np.expand_dims(np.load(join(args.data_dir_train, "age_u.npy")),
                               1)
        age_v = np.expand_dims(np.load(join(args.data_dir_val, "age.npy")), 1)
        age_l = (age_l - age_mean) / age_std
        age_u = (age_u - age_mean) / age_std
        age_v = (age_v - age_mean) / age_std
    else:
        age_l, age_u, age_v = [], [], []

    if args.use_rs:
        rs_l = one_hot(np.load(join(args.data_dir_train, "rs_l.npy")), 2)
        rs_u = one_hot(np.load(join(args.data_dir_train, "rs_u.npy")), 2)
        rs_v = one_hot(np.load(join(args.data_dir_val, "rs.npy")), 2)
    else:
        rs_l, rs_u, rs_v = [], [], []

    if args.use_rs and args.use_age:
        c_l = np.concatenate([age_l, rs_l], axis=1)
        c_u = np.concatenate([age_u, rs_u], axis=1)
        c_v = np.concatenate([age_v, rs_v], axis=1)
        c_dim = c_l.shape[1]
    elif args.use_rs:
        c_l, c_u, c_v = rs_l, rs_u, rs_v
        c_dim = c_l.shape[1]
    elif args.use_age:
        c_l, c_u, c_v = age_l, age_u, age_v
        c_dim = c_l.shape[1]
    else:
        c_l, c_u, c_v = np.array([]), np.array([]), np.array([])
        c_dim = 0

    y_data_val = y_data_val[:len(x_data_val)]
    print('x unlabeled data shape:', x_data_train_unlabeled.shape)
    print('x labeled data shape:', x_data_train_labeled.shape)
    print('x val data shape:', x_data_val.shape)
    assert data_shape == tuple(x_data_val.shape[2:])
    print('input labels: %d' % n_labels)

    model = SemiVAE(args.zdim,
                    y_dim,
                    c_dim,
                    n_labels=n_labels,
                    binary_input=args.binary_input).to(device)
    print_num_params(model)

    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    start_epoch = 0

    if args.load != "":
        print("Loading model from %s" % args.models_dir)
        nums = [int(i.split("_")[-1]) for i in os.listdir(args.models_dir)]
        start_epoch = max(nums)
        model_path = join(args.models_dir, "model_epoch_%d" % start_epoch)
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        if 'model_global_step' in checkpoint.keys():
            model.global_step = checkpoint['model_global_step']
        start_epoch = checkpoint['epoch']
        print("Loaded model at epoch %d, total steps: %d" %
              (start_epoch, model.global_step))

    t_start = dt.now()
    for epoch in range(int(start_epoch + 1), int(args.epochs)):
        train(x_data_train_unlabeled, x_data_train_labeled,
              y_data_train_labeled, x_data_val, y_data_val, c_l, c_u, c_v,
              args.batch_size, epoch, model, optimizer, device, log_interval,
              writer, args.log_dir, n_labels)
        if (dt.now() - t_start).total_seconds() > 3600 * 2:
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'model_global_step': model.global_step,
                }, join(args.models_dir, "model_epoch_%d" % epoch))
            t_start = dt.now()
        sys.stdout.flush()  # need this when redirecting to file
Beispiel #11
0
                            help="Contains full path to the root directory for symbols.",
                            metavar="path", required=True)
        parser.add_argument("-o", "--out", dest="out",
                            help="write the result into the out file. If omitted, the"
                            " result is written into the stdout",
                            metavar="file")
        parser.add_argument("--android_ndk_home", dest="ndk_home",
                            help="path of the Android NDK home[optional]. If omitted,"
                            " get it from environment by \"ANDROID_NDK_HOME\"",
                            metavar="path")
    except argparse.ArgumentError:
        pass
    if category:
        parser = parser.add_argument_group("memory")
        required = False
    parser.add_argument('-p', "--process", dest="process",
                        help="Process name you want to parse the memory stack",
                        metavar="process_name", required=required)
    parser.add_argument('-a', "--allocation", dest="alloc",
                        help="The allocation memory stack file",
                        metavar="file", required=required)

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Analyze the memory stack to check memory leak")
    generate_help(parser, False);
    args = parser.parse_args()
    rs = check_args(args)
    if not rs:
        rs = parse_memory_stack(args)
    exit(rs)
Beispiel #12
0
    try:
        parser.add_argument('-s', "--symbols", dest="symbols",
                            help="Contains full path to the root directory for symbols.",
                            metavar="path", required=True)
        parser.add_argument("-o", "--out", dest="out",
                            help="write the result into the out file. If omitted,"
                            " the result is written into the stdout",
                            metavar="file")
        parser.add_argument("--android_ndk_home", dest="ndk_home",
                            help="path of the Android NDK home[optional]. If omitted,"
                            " get it from environment by \"ANDROID_NDK_HOME\"",
                            metavar="path")
    except argparse.ArgumentError:
        pass
    if category:
        parser = parser.add_argument_group("crash")
        required = False
    parser.add_argument('-d', "--dump", dest="dump",
                      help="The crash dump. This is an optional parameter."
                           " If omitted, parse_stack will read input data from stdin",
                      metavar="file", required=required)

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Analyze the crash stack to print crash functions")
    generate_help(parser, False);
    args = parser.parse_args()
    rs = check_args(args)
    if not rs:
        rs = parse_crash_stack(args)
    exit(rs)
Beispiel #13
0
def main():
    #### options
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_ids', type=str, default='0,1,2,3,4,5,6,7')

    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--dev_ratio', type=float, default=0.01)
    parser.add_argument('--lr_G', type=float, default=1e-4)
    parser.add_argument('--weight_decay_G', type=float, default=0)
    parser.add_argument('--beta1_G', type=float, default=0.9)
    parser.add_argument('--beta2_G', type=float, default=0.99)
    parser.add_argument('--lr_D', type=float, default=1e-4)
    parser.add_argument('--weight_decay_D', type=float, default=0)
    parser.add_argument('--beta1_D', type=float, default=0.9)
    parser.add_argument('--beta2_D', type=float, default=0.99)
    parser.add_argument('--lr_scheme', type=str, default='MultiStepLR')
    parser.add_argument('--niter', type=int, default=100000)
    parser.add_argument('--warmup_iter', type=int, default=-1)
    parser.add_argument('--lr_steps', type=list, default=[50000])
    parser.add_argument('--lr_gamma', type=float, default=0.5)
    parser.add_argument('--pixel_criterion', type=str, default='l1')
    parser.add_argument('--pixel_weight', type=float, default=1e-2)
    parser.add_argument('--feature_criterion', type=str, default='l1')
    parser.add_argument('--feature_weight', type=float, default=1)
    parser.add_argument('--gan_type', type=str, default='ragan')
    parser.add_argument('--gan_weight', type=float, default=5e-3)
    parser.add_argument('--D_update_ratio', type=int, default=1)
    parser.add_argument('--D_init_iters', type=int, default=0)

    parser.add_argument('--print_freq', type=int, default=100)
    parser.add_argument('--val_freq', type=int, default=1000)
    parser.add_argument('--save_freq', type=int, default=10000)
    parser.add_argument('--crop_size', type=float, default=0.85)
    parser.add_argument('--lr_size', type=int, default=128)
    parser.add_argument('--hr_size', type=int, default=512)

    # network G
    parser.add_argument('--which_model_G', type=str, default='RRDBNet')
    parser.add_argument('--G_in_nc', type=int, default=3)
    parser.add_argument('--out_nc', type=int, default=3)
    parser.add_argument('--G_nf', type=int, default=64)
    parser.add_argument('--nb', type=int, default=16)

    # network D
    parser.add_argument('--which_model_D',
                        type=str,
                        default='discriminator_vgg_128')
    parser.add_argument('--D_in_nc', type=int, default=3)
    parser.add_argument('--D_nf', type=int, default=32)

    # data dir
    parser.add_argument('--hr_path',
                        type=list,
                        default=['data/celebahq-512/', 'data/ffhq-512/'])
    parser.add_argument('--lr_path', type=str, default='data/lr-128/')
    parser.add_argument('--checkpoint_dir',
                        type=str,
                        default='check_points/ESRGAN-V1/')
    parser.add_argument('--val_dir', type=str, default='dev_show')
    parser.add_argument('--training_state',
                        type=str,
                        default='check_points/ESRGAN-V1/state/')

    # resume the training
    parser.add_argument('--resume_state', type=str, default=None)
    parser.add_argument('--pretrain_model_G', type=str, default=None)
    parser.add_argument('--pretrain_model_D', type=str, default=None)

    parser.add_argument('--setting_file', type=str, default='setting.txt')
    parser.add_argument('--log_file', type=str, default='log.txt')
    args = check_args(parser.parse_args())
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids

    #### loading resume state if exists
    if args.resume_state is not None:
        # distributed resuming: all load into default GPU
        device_id = torch.cuda.current_device()
        resume_state = torch.load(
            args.resume_state,
            map_location=lambda storage, loc: storage.cuda(device_id))
    else:
        resume_state = None

    # load dataset
    total_img_list = []
    for hr_path in args.hr_path:
        total_img_list.extend(glob(hr_path + '/*'))

    random.shuffle(total_img_list)
    dev_list = total_img_list[:int(len(total_img_list) * args.dev_ratio)]
    train_list = total_img_list[int(len(total_img_list) * args.dev_ratio):]

    train_loader = create_dataloader(args,
                                     train_list,
                                     is_train=True,
                                     n_threads=len(args.gpu_ids.split(',')))
    dev_loader = create_dataloader(args,
                                   dev_list,
                                   is_train=False,
                                   n_threads=len(args.gpu_ids.split(',')))

    #### create model
    model = SRGANModel(args, is_train=True)
    if resume_state is not None:
        model.load()

    #### resume training
    if resume_state is not None:
        print('Resuming training from epoch: {}, iter: {}.'.format(
            resume_state['epoch'], resume_state['iter']))

        start_epoch = resume_state['epoch']
        current_step = resume_state['iter']
        model.resume_training(resume_state)  # handle optimizers and schedulers
    else:
        current_step = 0
        start_epoch = 0

    total_epochs = int(math.ceil(args.niter / len(train_loader)))

    #### training
    print('Start training from epoch: {:d}, iter: {:d}'.format(
        start_epoch, current_step))
    for epoch in range(start_epoch, total_epochs + 1):
        for _, train_data in enumerate(train_loader):
            current_step += 1
            if current_step > args.niter:
                break
            #### update learning rate
            model.update_learning_rate(current_step,
                                       warmup_iter=args.warmup_iter)

            #### training
            model.feed_data(train_data)
            model.optimize_parameters(current_step)

            #### log
            if current_step % args.print_freq == 0:
                logs = model.get_current_log()
                message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
                    epoch, current_step, model.get_current_learning_rate())
                for k, v in logs.items():
                    message += '{:s}: {:.4e} '.format(k, v)
                print(message)

            # validation
            if current_step % args.val_freq == 0:
                show_dir = os.path.join(args.checkpoint_dir, 'show_dir')
                os.makedirs(show_dir, exist_ok=True)
                dev_data = None
                for val_data in dev_loader:
                    dev_data = val_data
                    break

                model.feed_data(dev_data)
                model.test()

                visuals = model.get_current_visuals()
                display_online_results(visuals,
                                       current_step,
                                       show_dir,
                                       show_size=args.hr_size)

            #### save models and training states
            if current_step % args.save_freq == 0:
                print('Saving models and training states.')
                model.save(current_step)
                model.save_training_state(epoch, current_step)

    print('Saving the final model.')
    model.save('latest')
    print('End of training.')
def parse_args():
    desc = "Pytorch implementation of GAN collections"
    parser = argparse.ArgumentParser(description=desc)

    parser.add_argument('--gan_type', type=str, default='CVAE',
                        choices=['GAN', 'Classifier', 'CGAN', 'BEGAN', 'WGAN',
                                 'WGAN_GP', 'VAE', "CVAE", "WGAN_GP"],
                        help='The type of GAN')  # , required=True)
    parser.add_argument('--dataset', type=str, default='mnist', choices=['mnist', 'fashion', 'cifar10'],
                        help='The name of dataset')
    parser.add_argument('--conditional', type=bool, default=False)
    parser.add_argument('--upperbound', type=bool, default=False,
                        help='This variable will be set to true automatically if task_type contains_upperbound')
    parser.add_argument('--method', type=str, default='Baseline', choices=['Baseline', 'Ewc', 'Ewc_samples',
                                                                           'Generative_Replay', 'Rehearsal'])

    parser.add_argument('--context', type=str, default='Generation',
                        choices=['Classification', 'Generation', 'Not_Incremental'])

    parser.add_argument('--dir', type=str, default='./Archives/', help='Working directory')
    parser.add_argument('--save_dir', type=str, default='models', help='Directory name to save the model')
    parser.add_argument('--result_dir', type=str, default='results', help='Directory name to save results')
    parser.add_argument('--sample_dir', type=str, default='Samples', help='Directory name to save the generated images')
    parser.add_argument('--log_dir', type=str, default='logs', help='Directory name to save training logs')
    parser.add_argument('--data_dir', type=str, default='Data', help='Directory name for data')
    parser.add_argument('--gen_dir', type=str, default='.', help='Directory name for data')

    parser.add_argument('--epochs', type=int, default=1, help='The number of epochs to run')
    parser.add_argument('--epoch_G', type=int, default=1, help='The number of epochs to run')
    parser.add_argument('--epoch_Review', type=int, default=50, help='The number of epochs to run')
    parser.add_argument('--batch_size', type=int, default=64, help='The size of batch')
    parser.add_argument('--size_epoch', type=int, default=1000)
    parser.add_argument('--gpu_mode', type=bool, default=True)
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--verbose', type=bool, default=False)

    parser.add_argument('--lrG', type=float, default=0.0002)
    parser.add_argument('--lrD', type=float, default=0.0002)
    parser.add_argument('--lrC', type=float, default=0.01)
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)')
    parser.add_argument('--beta1', type=float, default=0.5)
    parser.add_argument('--beta2', type=float, default=0.999)

    parser.add_argument('--seed', type=int, default=1664)
    parser.add_argument('--eval', type=bool, default=True)
    parser.add_argument('--train_G', type=bool, default=False)
    parser.add_argument('--eval_C', type=bool, default=False)

    ############### UNUSED FLAGS ##########################
    parser.add_argument('--trainEval', type=bool, default=False)
    parser.add_argument('--knn', type=bool, default=False)
    parser.add_argument('--IS', type=bool, default=False)
    parser.add_argument('--FID', type=bool, default=False)
    parser.add_argument('--Fitting_capacity', type=bool, default=False)
    #######################################################

    parser.add_argument('--num_task', type=int, default=10)
    parser.add_argument('--num_classes', type=int, default=10)
    parser.add_argument('--sample_transfer', type=int, default=5000)
    parser.add_argument('--task_type', type=str, default="disjoint",
                        choices=['disjoint', 'permutations', 'upperbound_disjoint'])
    parser.add_argument('--samples_per_task', type=int, default=200)
    parser.add_argument('--lambda_EWC', type=int, default=5)
    parser.add_argument('--nb_samples_reharsal', type=int, default=10)
    parser.add_argument('--regenerate', type=bool, default=False)

    return check_args(parser.parse_args())
        mpi_rank = hvd.local_rank()
        assert mpi_size == n_gpu
        training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]
        print_rank0('GPU NUM', n_gpu)
    else:
        hvd = None
        mpi_size = 1
        mpi_rank = 0
        training_hooks = None
        print('GPU NUM', n_gpu)

    args.checkpoint_dir += (
        '/epoch{}_batch{}_lr{}_warmup{}_anslen{}_tf/'.format(
            args.train_epochs, args.n_batch, args.lr, args.warmup_rate,
            args.max_ans_length))
    args = utils.check_args(args, mpi_rank)
    print_rank0('######## generating data ########')

    if mpi_rank == 0:
        tokenizer = BertTokenizer(vocab_file=args.vocab_file,
                                  do_lower_case=True)
        assert args.vocab_size == len(tokenizer.vocab)
        if not os.path.exists(args.train_dir):
            json2features(args.train_file, [
                args.train_dir.replace('_features_', '_examples_'),
                args.train_dir
            ],
                          tokenizer,
                          is_training=True)

        if not os.path.exists(args.dev_dir1) or not os.path.exists(
Beispiel #16
0
            out_file = os.path.join(output, filename)
            torch.save(model.state_dict(), out_file)

        print("Best BLEU so far : {} (Epoch {})".format(
            best_bleu[0], best_bleu[1]))

        if logging:
            output_file = 'output_{}'.format(epoch)
            output_sentences = os.path.join(output, output_file)
            exh.write_text('\n'.join(generated_sentences), output_sentences)

        model.train(True)
        torch.set_grad_enabled(True)
        print("Epoch finished in {} seconds".format(time.time() - secs))

        if epoch - best_bleu[1] == 3:
            break

        epoch += 1

    if logging:
        scores['BLEU'] = bleus
        output_scores = os.path.join(output, 'scores.json')
        exh.write_json(scores, output_scores)
        print("Scores saved in {}".format(output_scores))


if __name__ == "__main__":
    args = check_args(sys.argv)
    run(args)
Beispiel #17
0
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import argparse

import crash_statck 
import memory_stack
import utils

if __name__=="__main__":

    parser = argparse.ArgumentParser(description="Analyze the android native crash stack or native memory stack")
    memory_stack.generate_help(parser, True);
    crash_statck.generate_help(parser, True);
    args = parser.parse_args()
    rs = utils.check_args(args)
    if rs:
        exit(rs)
    if args.alloc:
        rs = memory_stack.parse_memory_stack(args)
    else:
        rs = crash_statck.parse_crash_stack(args)
    exit(rs)