コード例 #1
0
ファイル: uploader.py プロジェクト: DanilaOak/uploader
async def get_file(request: web.Request):
    user_id = id_validator(request.match_info['user_id'], 'User')
    file_id = id_validator(request.match_info['file_id'], 'File')

    user_table = get_model_by_name('user')
    user_exists = await request.app['pg'].fetchval(
        select([exists().where(user_table.c.user_id == user_id)]))

    if not user_exists:
        raise web.HTTPNotFound(body=json.dumps({'error': 'User not found'}),
                               content_type='application/json')

    file_table = get_model_by_name('file')
    file_exists = await request.app['pg'].fetchval(
        select([
            exists().where(
                and_(file_table.c.id == file_id,
                     file_table.c.user_id == user_id))
        ]))

    if not file_exists:
        raise web.HTTPNotFound(body=json.dumps({'error': 'File not found'}),
                               content_type='application/json')

    file = await request.app['pg'].fetchrow(
        file_table.select().where(file_table.c.id == file_id))
    file = row_to_dict(file, 'file')

    return web.FileResponse(path=file['path'], status=200)
コード例 #2
0
ファイル: uploader.py プロジェクト: DanilaOak/uploader
    async def post(self):
        user_id = id_validator(self.request.match_info['user_id'], 'User')

        if self.request.content_type != 'multipart/form-data' or self.request.content_length == 0:
            return web.json_response(data=[])

        user_table = get_model_by_name('user')
        file_table = get_model_by_name('file')
        user_exists = await self.request.app['pg'].fetchval(
            select([exists().where(user_table.c.user_id == user_id)]))

        if not user_exists:
            await self.request.app['pg'].fetchrow(
                user_table.insert().values(**{'user_id': user_id}))

        reader = await self.request.multipart()
        upload_folder = self.request.app['config']['UPLOAD_FOLDER']
        data = []
        while not reader.at_eof():
            image = await reader.next()

            if not image:
                break

            file_name, ext = get_ext(image.filename)
            generated_file_name = '{}.{}'.format(uuid.uuid4(), ext)
            full_path = os.path.abspath(
                os.path.join(upload_folder, generated_file_name))
            size = 0

            with open(full_path, 'wb') as f:
                while True:
                    chunk = await image.read_chunk()
                    if not chunk:
                        break
                    size += len(chunk)
                    f.write(chunk)

            body = {
                'user_id': user_id,
                'name': image.filename,
                'path': full_path,
                'size': size
            }

            file = await self.request.app['pg'].fetchrow(
                file_table.insert().values(**body).returning(
                    literal_column('*')))
            file = row_to_dict(file, 'file')
            data.append(dict(file))

        return web.json_response(data=data)
コード例 #3
0
ファイル: uploader.py プロジェクト: DanilaOak/uploader
    async def get(self):
        user_id = id_validator(self.request.match_info['user_id'], 'User')

        user_table = get_model_by_name('user')
        user_exists = await self.request.app['pg'].fetchval(
            select([exists().where(user_table.c.user_id == user_id)]))

        if not user_exists:
            raise web.HTTPNotFound(body=json.dumps(
                {'error': f'User with id={user_id} not found'}),
                                   content_type='application/json')

        file_table = get_model_by_name('file')
        files = await self.request.app['pg'].fetch(
            file_table.select().where(file_table.c.user_id == user_id))
        files = many_row_to_dict(files, 'file')

        return web.json_response(data=files)
コード例 #4
0
    def __init__(self,
                 sketches_x,
                 sketches_y,
                 class_labels,
                 model_id,
                 out_dir,
                 resume=True,
                 gpu_id=0):
        """train a new model with the given data. the data
        is expected to be in a stroke-3 format.

        Args:
            sketches_x (array-like): array of N sketches in 3-stroke format
            sketches_y (arraly-like): class labels for each of the N sketches
            class_labels (array-like): all class labels
            model_id (string): unique id for the model, it is used to store, restore checkpoints
            out_dir (string): the path where the checkpoints should be stored
            resume (bool, optional): If true, it will resume the latest checkpoint. Defaults to True.
            gpu_id (int, optional): id of the gpu to run the training on. Defaults to 0.
        """
        # create out_dir if not exist
        if not os.path.isdir(out_dir):
            os.mkdir(out_dir)

        # storing the number of classes will be useful for using the embeddings later with different
        # number of classes in a testing data.
        self.n_classes = len(class_labels)
        self.class_labels = class_labels
        utils.gpu.setup_gpu(gpu_id)

        # prepare the dataset
        dataset = self._convert_data(sketches_x, sketches_y, is_training=True)
        Model = models.get_model_by_name(self.SKETCHFORMER_MODEL_NAME)

        # # update all slow metricss to none
        Transformer.slow_metrics = []

        self.model = Model(Model.default_hparams(), dataset, out_dir, model_id)

        if resume:
            print("[run-experiment] resorting checkpoint if exists")
            self.model.restore_checkpoint_if_exists("latest")

        # continue training
        if dataset.n_samples != 0:
            self.model.train()
コード例 #5
0
ファイル: main.py プロジェクト: mgorinova/autoreparam
def main(_):

    # tf.logging.set_verbosity(tf.logging.ERROR)
    np.warnings.filterwarnings('ignore')

    util.print('Loading model {} with dataset {}.'.format(
        FLAGS.model, FLAGS.dataset))

    model_config = models.get_model_by_name(FLAGS.model, dataset=FLAGS.dataset)

    if FLAGS.results_dir == '':
        results_dir = FLAGS.model + '_' + FLAGS.dataset
    else:
        results_dir = FLAGS.results_dir

    if not tf.io.gfile.exists(results_dir):
        tf.io.gfile.makedirs(results_dir)

    filename = '{}{}{}{}{}.json'.format(
        FLAGS.method, ('_' + FLAGS.learnable_parameterisation_type
                       if 'VIP' in FLAGS.method else ''),
        ('_tied' if FLAGS.tied_pparams else ''),
        ('_reparam_variational' if 'VIP' in FLAGS.method
         and FLAGS.reparameterise_variational else ''),
        ('_discrete_prior'
         if 'VIP' in FLAGS.method and FLAGS.discrete_prior else ''))

    file_path = os.path.join(results_dir, filename)

    if FLAGS.inference == 'VI':

        run_vi(model_config, results_dir, file_path)

    elif FLAGS.inference == 'HMC':
        if FLAGS.method == 'i':
            run_interleaved_hmc(model_config, results_dir, file_path)
        else:
            run_hmc(model_config, results_dir, file_path, tuning=False)
    elif FLAGS.inference == 'HMCtuning':
        run_hmc(model_config, results_dir, file_path, tuning=True)
コード例 #6
0
def main():

    parser = argparse.ArgumentParser(
        description='Train modified transformer with sketch data')
    parser.add_argument("model_name",
                        default=None,
                        help="Model that we are going to train")
    parser.add_argument("--id", default="0", help="experiment signature")
    parser.add_argument("--data-loader",
                        default='stroke3-distributed',
                        help="Data loader that will provide data for model")
    parser.add_argument("--dataset", default=None, help="Input data folder")
    parser.add_argument("-o",
                        "--output-dir",
                        default="",
                        help="output directory")
    parser.add_argument('-p',
                        "--hparams",
                        default=None,
                        help="Parameters to override")
    parser.add_argument(
        "-g",
        "--gpu",
        default=0,
        type=int,
        nargs='+',
        help="GPU ID to run on",
    )
    parser.add_argument('--metrics',
                        type=str,
                        nargs='+',
                        help="selection of metrics you want to calculate")
    parser.add_argument("--help-hps",
                        action="store_true",
                        help="Prints out the hparams file")
    parser.add_argument("-r",
                        "--resume",
                        default='latest',
                        help="One of 'latest' or a checkpoint name")
    args = parser.parse_args()

    # get our model and data loader classes
    Model = models.get_model_by_name(args.model_name)
    DataLoader = dataloaders.get_dataloader_by_name(args.data_loader)

    # load the config
    hps = utils.hparams.combine_hparams_into_one(Model.default_hparams(),
                                                 DataLoader.default_hparams())
    utils.hparams.load_config(
        hps, Model.get_config_filepath(args.output_dir, args.id))

    # check for help screams from the void
    if args.help_hps:
        combined_hps = pprint.pformat(hps.values())
        print("\nLoaded parameters from {}: \n{}\n\n".format(
            args.model_dir, combined_hps))
        return

    # optional override of parameters
    if args.hparams:
        hps.parse(args.hparams)

    # build model, load checkpoints
    utils.gpu.setup_gpu(args.gpu)
    dataset = DataLoader(hps, args.dataset)
    model = Model(hps, dataset, args.output_dir, args.id)
    model.restore_checkpoint_if_exists(args.resume)

    # compute and send metrics
    metric_names = args.metrics
    metrics_list = {
        m: metrics.build_metric_by_name(m, hps.values())
        for m in metric_names
    }
    model.compute_metrics_from(metrics_list)
    model.plot_and_send_notification_for(metrics_list)
    model.clean_up_tmp_dir()
コード例 #7
0
def main():
    # Parsing arguments
    parser = argparse.ArgumentParser(description='Training GANs or VAEs')
    parser.add_argument('--model', type=str, required=True)
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--epoch', type=int, default=200)
    parser.add_argument('--batchsize', type=int, default=50)
    parser.add_argument('--output', default='output')
    parser.add_argument('--z-dims', type=int, default=256)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--test-mode', action='store_true')
    parser.add_argument('--is-conditional', action='store_true')
    parser.add_argument('--aux-classifier', action='store_true')
    parser.add_argument('--label-smoothing', default=0.0, type=float)
    parser.add_argument('--input-noise', default=0.0, type=float)
    parser.add_argument('--run-id', '-r', required=True)
    parser.add_argument('--checkpoint-every', default='1.', type=str)
    parser.add_argument('--notify-every', default='1.', type=str)
    parser.add_argument('--lr', default=1e-4, type=float)
    parser.add_argument('--dis-loss-control', default=1., type=float)
    parser.add_argument('--triplet-weight', default=1., type=float)
    parser.add_argument('--embedding-dim', default=256, type=int)
    parser.add_argument('--isolate-d-classifier', action='store_true')
    parser.add_argument(
        '--controlled-losses',
        type=str,
        nargs='+',
        help="strings in format loss_name:weight:control_type:pivot_epoch")
    parser.add_argument('--metrics',
                        type=str,
                        nargs='+',
                        help="selection of metrics you want to calculate")
    parser.add_argument('--wgan-n-critic', default=5, type=int)
    parser.add_argument('--began-gamma', default=0.5, type=float)

    args = parser.parse_args()

    # select gpu and limit resources if applicable
    if 'tensorflow' == K.backend():
        import tensorflow as tf
        from keras.backend.tensorflow_backend import set_session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = str(args.gpu)
        set_session(tf.Session(config=config))

    # make output directory if not exists
    if not os.path.isdir(args.output):
        os.mkdir(args.output)

    # load datasets
    dataset = load_dataset(args.dataset)

    model = models.get_model_by_name(args.model)(input_shape=dataset.shape[1:],
                                                 **vars(args))

    if args.resume:
        model.load_model(args.resume)

    model.main_loop(dataset, epochs=args.epoch, batchsize=args.batchsize)
コード例 #8
0
def main():

    parser = argparse.ArgumentParser(
        description='Train modified transformer with sketch data')
    parser.add_argument("model_name",
                        default=None,
                        help="Model that we are going to train")

    parser.add_argument("--id", default="0", help="experiment signature")
    parser.add_argument("--dataset", default=None, help="Input data folder")
    parser.add_argument("-o",
                        "--output-dir",
                        default="",
                        help="Output directory")

    parser.add_argument("-p",
                        "--hparams",
                        default=None,
                        help="Parameters that are specific to one model. They "
                        "can regard hyperparameters such as number of layers"
                        "or specifics of training such as an optmiser choice")
    parser.add_argument("--base-hparams",
                        default=None,
                        help="Model parameters that concern all models. "
                        "Those are related to logging, checkpointing, "
                        "notifications and loops")
    parser.add_argument("--data-hparams",
                        default=None,
                        help="Dataset-related parameters. Regards data"
                        "formats and preprocessing parameters")

    parser.add_argument(
        "-g",
        "--gpu",
        default=0,
        type=int,
        nargs='+',
        help="GPU ID to run on",
    )
    parser.add_argument("-r",
                        "--resume",
                        default=None,
                        help="One of 'latest' or a checkpoint name")
    parser.add_argument("--data-loader",
                        default='stroke3-distributed',
                        help="Data loader that will provide data for model")
    parser.add_argument("--help-hps",
                        action="store_true",
                        help="Prints out each hparams default values")
    args = parser.parse_args()

    # get our model and data loader classes
    Model = models.get_model_by_name(args.model_name)
    DataLoader = dataloaders.get_dataloader_by_name(args.data_loader)

    # check for desperate help calls from the unending darkness
    if args.help_hps:
        base_help = pprint.pformat(Model.base_default_hparams().values())
        specific_help = pprint.pformat(
            Model.specific_default_hparams().values())
        data_help = pprint.pformat(DataLoader.default_hparams().values())
        print("\nBase model default parameters: \n{}\n\n"
              "{} default parameters: \n{}\n\n"
              "{} data loader default parameters: \n{}".format(
                  base_help, args.model_name, specific_help, args.data_loader,
                  data_help))
        return

    # parse the parameters, updating defaults
    model_hps = Model.parse_hparams(base=args.base_hparams,
                                    specific=args.hparams)
    data_hps = DataLoader.parse_hparams(args.data_hparams)

    # build model, load checkpoints
    utils.gpu.setup_gpu(args.gpu)
    dataset = DataLoader(data_hps, args.dataset)
    model = Model(model_hps, dataset, args.output_dir, args.id)
    if args.resume is not None:
        model.restore_checkpoint_if_exists(args.resume)

    # combine and save config file
    combined_hps = utils.hparams.combine_hparams_into_one(model_hps, data_hps)
    utils.hparams.save_config(model.config_filepath, combined_hps)

    # train
    model.train()
コード例 #9
0
def main():

    parser = argparse.ArgumentParser(
        description='Train modified transformer with sketch data')
    parser.add_argument("experiment_name", default=None,
                        help="Reference name of experiment that you want to run")
    parser.add_argument("--id", default="0", help="Experiment signature")

    parser.add_argument("-o", "--output-dir", default="", help="output directory")
    parser.add_argument("--exp-hparams", default=None,
                        help="Parameters to override defaults for experiment")
    parser.add_argument("--model-hparams", default=None,
                        help="Parameters to override defaults for model")
    parser.add_argument("-g", "--gpu", default=0, type=int, nargs='+', help="GPU ID to run on", )

    parser.add_argument("--model-name", default=None,
                        help="Model that ou want to experiment on")
    parser.add_argument("--model-id", default=None,
                        help="Model that ou want to experiment on")

    parser.add_argument("--data-loader", default='stroke3-distributed',
                        help="Data loader that will provide data for model, "
                        "if you want to load a model")
    parser.add_argument("--dataset", default=None,
                        help="Input data folder if you want to load a model")
    parser.add_argument("-r", "--resume", default='latest', help="One of 'latest' or a checkpoint name")
    parser.add_argument("--help-hps", action="store_true",
                        help="Prints out the hparams default values")
    args = parser.parse_args()

    Experiment = experiments.get_experiment_by_name(args.experiment_name)

    # check for lost users in the well of despair
    if args.help_hps:
        hps_description = pprint.pformat(Experiment.default_hparams().values())
        print("\nDefault params for experiment {}: \n{}\n\n".format(
            args.experiment_name, hps_description))
        return

    # load model if that is what the experiment requires
    utils.gpu.setup_gpu(args.gpu)
    if Experiment.requires_model:
        Model = models.get_model_by_name(args.model_name)
        DataLoader = dataloaders.get_dataloader_by_name(args.data_loader)

        # load the modelconfig
        model_hps = utils.hparams.combine_hparams_into_one(
            Model.default_hparams(), DataLoader.default_hparams())
        utils.hparams.load_config(
            model_hps, Model.get_config_filepath(args.output_dir, args.model_id))

        # optional override of parameters
        if args.model_hparams:
            model_hps.parse(args.model_hparams)

        dataset = DataLoader(model_hps, args.dataset)
        model = Model(model_hps, dataset, args.output_dir, args.model_id)
        model.restore_checkpoint_if_exists(args.resume)
    else:
        dataset, model = None, None

    experiment_hps = Experiment.parse_hparams(args.exp_hparams)

    # finally, run the experiment
    experiment = Experiment(experiment_hps, args.id, args.output_dir)
    experiment.compute(model)
コード例 #10
0
def compute_score(model_name, image):
    "Accepts a model name and a PIL image and returns an integer from 0 to 4"
    model_class = get_model_by_name(model_name)
    model = model_class()
    score = model.predict(image)
    return score
コード例 #11
0
def main():
    # Parsing arguments
    parser = argparse.ArgumentParser(description='Training GANs or VAEs')
    parser.add_argument('--model', type=str, required=True)
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--epoch', type=int, default=200)
    parser.add_argument('--batchsize', type=int, default=50)
    parser.add_argument('--output', default='output')
    parser.add_argument('--z-dims', type=int, default=256)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--test-mode', action='store_true')
    parser.add_argument('--is-conditional', action='store_true')
    parser.add_argument('--aux-classifier', action='store_true')
    parser.add_argument('--share-decoder-and-generator', action='store_true')
    parser.add_argument('--label-smoothing', default=0.0, type=float)
    parser.add_argument('--input-noise', default=0.0, type=float)
    parser.add_argument('--run-id', '-r', required=True)
    parser.add_argument('--checkpoint-every', default='1.', type=str)
    parser.add_argument('--notify-every', default='1.', type=str)
    parser.add_argument('--send-every', default=1, type=int)
    parser.add_argument('--lr', default=1e-4, type=float)
    parser.add_argument('--dis-loss-control', default=1., type=float)
    parser.add_argument('--triplet-weight', default=1., type=float)
    parser.add_argument('--embedding-dim', default=256, type=int)
    parser.add_argument('--isolate-d-classifier', action='store_true')
    parser.add_argument(
        '--controlled-losses',
        type=str,
        nargs='+',
        help="strings in format loss_name:weight:control_type:pivot_epoch")
    parser.add_argument('--metrics',
                        type=str,
                        nargs='+',
                        help="selection of metrics you want to calculate")
    parser.add_argument('--wgan-n-critic', default=5, type=int)
    parser.add_argument('--began-gamma', default=0.5, type=float)
    parser.add_argument('--triplet-margin', default=1., type=float)
    parser.add_argument('--n-filters-factor', default=32, type=int)
    parser.add_argument('--use-began-equilibrium', action='store_true')
    parser.add_argument('--use-alignment-layer', action='store_true')
    parser.add_argument('--use-simplified-triplet', action='store_true')
    parser.add_argument('--data-folder', default='datasets/files')
    parser.add_argument('--use-magan-equilibrium', action='store_true')
    parser.add_argument('--topgan-enforce-std-dev', action='store_true')
    parser.add_argument('--topgan-use-data-trilet-regularization',
                        action='store_true')
    parser.add_argument('--use-began-loss', action='store_true')
    parser.add_argument('--use-gradnorm', action='store_true')
    parser.add_argument('--use-sigmoid-triplet', action='store_true')
    parser.add_argument('--online-mining', default=None, type=str)
    parser.add_argument('--online-mining-ratio', default=4, type=int)
    parser.add_argument('--gradnorm-alpha', default=0.5, type=float)
    parser.add_argument('--distance-metric', default='l2', type=str)
    parser.add_argument('--slack-channel', type=str, default="random")
    parser.add_argument('--use-quadruplet', action='store_true')
    parser.add_argument('--generator-mining', action='store_true')

    args = parser.parse_args()

    # select gpu and limit resources if applicable
    if 'tensorflow' == K.backend():
        from keras.backend.tensorflow_backend import set_session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = str(args.gpu)
        set_session(tf.Session(config=config))

    # make output directory if not exists
    if not os.path.isdir(args.output):
        os.mkdir(args.output)

    # load datasets
    dataset = load_dataset(args.dataset)

    model = models.get_model_by_name(args.model)(input_shape=dataset.shape[1:],
                                                 **vars(args))

    if args.resume:
        model.load_model(args.resume)

    get_inference_via_optimization(model, dataset)
コード例 #12
0
    dataset_name = "mnist"
    model_name = AVAILABLE_MODELS[args.model]
    loss_fn = AVAILABLE_LOSS_FNS[args.loss]
    batch_size = config["batch_size"][dataset_name.lower()]
    train_subset_size = config["train_subset_size"][model_name.lower()]
    epochs = args.epochs if args.epochs else config["epochs"]
    previous_model = None
    if args.prev:
        if loss_fn not in args.prev:
            raise RuntimeError("Use same loss function")
        if not os.path.exists(args.prev):
            raise FileNotFoundError("Couldn't find {}".format(args.prev))
        previous_model = torch.load(args.prev)
    num_params = config["num_params"][model_name]
    num_models = None
    if args.num:
        num_models = args.num
    model = get_model_by_name(model_name)
    dataset = get_dataset(dataset_name, train_subset_size, batch_size)

    training = Training(model,
                        dataset,
                        num_params,
                        epochs,
                        prev_model=previous_model,
                        num_models=num_models,
                        loss_fn=loss_fn)
    training.start()
    training.save()
コード例 #13
0
ITERATIONS = 1
TEMPERATURE = 0

ENCODER_NAME = "oneplane"
AGENT_NAME = "policy_agent"
MODEL_NAME = "policy_gradient_model"

AGENT_FILE_NAME = "q_learning_less_games_1"
LOAD_AGENT = False
SAVE_AGENT = True

CUR_WINS = 0

encoder = encoders.get_encoder_by_name(ENCODER_NAME, ROWS, COLS, MINES)

model = models.get_model_by_name(MODEL_NAME, encoder)

if LOAD_AGENT:
    with h5py.File(AGENT_FILE_NAME, 'r') as prev_agent:
        agent = agents.load_agent_by_name(AGENT_NAME, prev_agent)
else:
    agent = agents.get_agent_by_name(AGENT_NAME, model, encoder)

if TEMPERATURE:
    agent.set_temperature(TEMPERATURE)

while True:
    print("Creating 100 games with updated agent")
    create_experience(agent, model, ROWS, COLS, MINES, 1, 100)

    for file_name in os.listdir("experience"):