Esempio n. 1
0
    def get_template_values(self):
        subscriptions = get_config().get('subscriptions')
        feeds = Feed.all().order('-date')
        template_values = {
            'feeds': feeds,
            'subscriptions': subscriptions
        }

        return template_values
Esempio n. 2
0
    elif model.inner_scale != 1 and model.inner_scale % 2 != 0:
        raise ValueError('inner_scale must be 1 or multiple of 2.')
    if args.finetune is not None:
        chainer.serializers.load_npz(args.finetune, model)

    if args.gpu >= 0:
        chainer.backends.cuda.check_cuda_available()
        chainer.backends.cuda.get_device(args.gpu).use()
        weight = chainer.backends.cuda.to_gpu(weight)
        model.to_gpu()

    optimizer = optimizers.Adam(alpha=args.learning_rate)
    optimizer.setup(model)
    print('done')

    valid_config = utils.get_config(args, model, train=False)
    train_config = utils.get_config(args, model, train=True)

    print('* check forward path...', end=' ')
    di = train_config.in_size
    do = train_config.out_size
    dx = model.xp.zeros((args.batch_size, 3, di, di), dtype=np.float32)
    dy = model(dx)
    if dy.shape[2:] != (do, do):
        raise ValueError('Invlid output size\n'
                         'Expect: {}\n'
                         'Actual: ({}, {})'.format(dy.shape[2:], do, do))
    print('done')

    print('* starting processes of dataset sampler...', end=' ')
    valid_queue = DatasetSampler(valid_list, valid_config)
Esempio n. 3
0
        params = {
            "learning_rate": [0.000001 * x for x in range(1000)],
            "epochs": [250],
            "batch_size": [32, 64, 128, 256, 512],
            "dropout": [0.01 * x for x in range(50)],
            "layers": [
                random.sample([64, 128, 256, 512, 1024], random.randint(1, 4))
                for _ in range(1000)
            ]
        }

        print("Hyperparameter search")

        for _ in range(20000):
            sampled_params = get_config(params)
            print("Configuration:")
            print(sampled_params)

            model, history = lib.mlp.fit_model(
                embeddings,
                train_scores,
                batch_size=sampled_params['batch_size'],
                epochs=sampled_params['epochs'],
                learning_rate=sampled_params['learning_rate'],
                x_val=val_embeddings,
                y_val=val_scores,
                name='mlp_model_best',
                layers=sampled_params["layers"],
                dropout=sampled_params["dropout"])
Description : Initialize all plugins in PLUGIN_FOLDER

Note : this is notes

"""
#===============================================================================
# IMPORT
#===============================================================================
import pymel.core as pm
import os.path, logging
import lib.utils as utils

#===============================================================================
# CONFIG VARIABLES
#===============================================================================
config = utils.get_config()
LOG_LEVEL = config["LOGGING_LEVEL"]
PLUGIN_PATH = config["PIXO_RIGGING_PATHS"]['PLUGIN_PATH']

#===============================================================================
# LOGGER
#===============================================================================
logger = logging.getLogger('  ')
logger.setLevel(config["LOGGING_LEVEL"])

#===============================================================================
# FUNCTIONS
#===============================================================================
def main(*args):

    plugins = [dir for dir in os.listdir( PLUGIN_PATH ) if not dir.startswith("_") and dir.endswith(".py")]
Esempio n. 5
0
def generate_with_predefined_sequences(opts,
                                       TR,
                                       sched_group,
                                       group='experimental'):
    """
    Generate schedule using sequences already defined. 
    """
    # get config
    config = get_config()
    type_data = get_seq_types(opts.type_file)

    seq_file = \
    opts.seq_file + ".json".format(sched_group) \
    if opts.seq_file else "./scheduling/sequences.json"
    #    opts.seq_file + "_{}.json".format(sched_group) \
    color_list = config["COLOR_LIST"]

    # create sequences
    row_list = []
    sess_num = 0  # 0 is baseline session

    np.random.seed(config["RND_SEED"] + 1000 * sched_group)

    for index, row in type_data.iterrows():

        seq_type, seq_length, max_chord_size, seq_keys, n_free_trials, \
        n_paced_trials, n_free_trials_testing, n_paced_trials_testing, \
        blocks, n_seqs_trained, n_seqs_untrained, n_seqs_fmri, \
        n_sess, testing_sessions, n_runs = row
        testing_session_list = \
        [int(x) for x in str(testing_sessions).split(",")]

        seq_keys = seq_keys.split(" ")
        blocks = [int(x) for x in blocks.split(",")]

        mygenerator = Generator(set=seq_keys,
                                size=seq_length,
                                maxchordsize=max_chord_size)
        trained_seqs, untrained_seqs \
        = mygenerator.read_grouped(seq_file, seq_type)

        if opts.cycle_offset:
            trained_seqs = trained_seqs[
                opts.cycle_offset:] + trained_seqs[:opts.cycle_offset]
            untrained_seqs = untrained_seqs[
                opts.cycle_offset:] + untrained_seqs[:opts.cycle_offset]

        n_trained = len(trained_seqs)
        n_untrained = len(untrained_seqs)
        reorder_trained = list(permutations(range(n_trained)))
        reorder_trained_fmri = list(combinations(range(n_trained),
                                                 n_seqs_fmri))
        #        reorder_untrained = list(combinations(range(n_untrained), n_seqs_untrained)) if not opts.no_untrained else []
        reorder_untrained = []

        untrained_list = range(n_untrained)
        #one = untrained_list[0]
        #twos = untrained_list[1:3]
        #rest = untrained_list[3:]
        untrained_groups = []
        for j in range(n_seqs_untrained):
            untrained_groups.append(untrained_list[j::n_seqs_untrained])

        for k in range(len(testing_session_list)):
            #            mycombination = [one, twos[k % 2], rest[k % len(rest)]]
            mycombination = [x[k % len(x)] for x in untrained_groups]
            random.shuffle(mycombination)
            reorder_untrained.append(tuple(mycombination))

    # n_seqs: how many are presented
    # get colors
        seq_color = {}
        for myseq in trained_seqs:
            index = random.randint(0, len(color_list) - 1)
            seq_color[myseq[1]] = color_list[index]
            del color_list[index]

        for myseq in untrained_seqs:
            index = random.randint(0, len(color_list) - 1)
            seq_color[myseq[1]] = color_list[index]
            del color_list[index]


#        untrained_index = 0
        trained_comb_num = 0
        untrained_comb_num = 0

        for sess in range(n_sess):

            # controls the order across sessions
            trained_combination = list(reorder_trained[trained_comb_num \
                                                       % len(reorder_trained)])

            trained_fmri_combination = list(reorder_trained_fmri[trained_comb_num \
                                                       % len(reorder_trained_fmri)])
            trained_comb_num = trained_comb_num + 1

            for paced in range(2):

                myruns = n_runs if paced and \
                sess_num in testing_session_list else 1 # sess+1

                if not sess_num in testing_session_list:  # training sess + 1
                    sess_type = "training"
                    n_trials = n_free_trials if paced == 0 else \
                    n_paced_trials

                    for seq in range(n_seqs_trained):
                        instruct = 1 if seq == 0 else 0
                        seq_index = trained_combination[seq]
                        seq_train = "trained"
                        sequence, sequence_string = \
                        trained_seqs[seq_index]

                        if n_trials and group == 'experimental' > 0:
                            row_list.append([
                                sess_num,
                                sess_type,
                                n_trials,
                                " ".join(seq_keys),
                                seq_type,
                                sequence_string,
                                seq_train,
                                seq_color[sequence_string],
                                trained_combination,
                                seq_index,
                                paced,
                                instruct,
                                1,  #run
                                1  # block
                            ])

                else:  # testing / fmri
                    untrained_combination = \
                    list(reorder_untrained[untrained_comb_num \
                                         % len(reorder_untrained)]) if not \
    opts.no_untrained > 0 else []
                    #                    print(untrained_combination)
                    #                    print(reorder_untrained)

                    if paced == 0:
                        sess_type = "testing"
                        n_trials = n_free_trials_testing

                        for seq in range(
                                n_seqs_trained +
                                n_seqs_untrained):  # trained and untrained
                            instruct = 1 if seq == 0 else 0

                            # interleave trained/untrained
                            if seq % 2 == 1 and not opts.no_untrained:
                                seq_index = untrained_combination[(seq - 1) /
                                                                  2]
                                shuffled_combination = untrained_combination
                                seq_train = "untrained"
                                sequence, sequence_string = \
                                untrained_seqs[seq_index]

                            else:
                                seq_index = trained_combination[seq / 2]
                                shuffled_combination = trained_combination
                                seq_train = "trained"
                                sequence, sequence_string = \
                                trained_seqs[seq_index]

                            if n_trials > 0:
                                row_list.append([
                                    sess_num,
                                    sess_type,
                                    n_trials,
                                    " ".join(seq_keys),
                                    seq_type,
                                    sequence_string,
                                    seq_train,
                                    seq_color[sequence_string],
                                    shuffled_combination,
                                    seq_index,
                                    paced,
                                    instruct,
                                    1,  #run
                                    1  # block
                                ])

                    else:
                        untrained_comb_num = untrained_comb_num + 1

                        sess_type = "fmri"

                        combination_index = trained_fmri_combination + \
                        untrained_combination
                        combination_type = \
                        len(trained_fmri_combination)*["trained"] + \
                        len(trained_fmri_combination)*["untrained"] # same amount of trained and untrained
                        combination = zip(combination_type, combination_index)
                        print(combination)
                        n_trials = np.sum(np.array(blocks))
                        # compute run statistics
                        nbeats = config["MAX_CHORD_SIZE"] + \
                        config["EXTRA_BEATS"]

                        ITI = list(
                            generate_ITIs(config["ITIMEAN_FMRI"],
                                          config["ITIRANGE_FMRI"], 'exp'))
                        trial_duration = config["BEAT_INTERVAL"]*nbeats + \
                        config["BUFFER_TIME"] + config["FIXATION_TIME"] + \
                        np.mean(ITI) #config["ITIMEAN_FMRI"]
                        run_duration = trial_duration*n_trials*\
                        (len(combination)) + config["START_TIME_FMRI"] + \
                        (len(combination)*n_trials/config["STRETCH_TRIALS"]-1)*config["STRETCH_TIME"]

                        total_duration = run_duration * n_runs
                        total_trials = n_runs * n_trials

                        print("Trial duration: %.2f s; " % (trial_duration) +
                              "Run duration: %.2f s (%.2f m, %d frames); " %
                              (run_duration, run_duration / 60,
                               np.ceil(run_duration / TR)) +
                              "Total duration: %.2f m; " %
                              (total_duration / 60) +
                              "Total trials per sequence: %d" % (total_trials))

                        for run in range(myruns):

                            shuffled_combination_run = \
                            shuffle_order(combination)
                            last_seq = 0
                            for block, n_group in enumerate(blocks):
                                shuffled_combination = \
                                shuffle_order(shuffled_combination_run)
                                # avoid repetitions
                                while last_seq == shuffled_combination[0]:

                                    shuffled_combination = \
                                    shuffle_order(shuffled_combination_run)

                                last_seq = shuffled_combination[-1]

                                # shuffle trained and untrained
                                for seq in range(len(shuffled_combination)):
                                    instruct = 1 if seq == 0 and \
                                    block == 0 else 0

                                    combination_type, combination_index = \
                                    shuffled_combination[seq]
                                    if combination_type == "untrained":
                                        seq_train = "untrained"
                                        sequence, sequence_string = \
                                        untrained_seqs[combination_index]

                                    else:
                                        seq_train = "trained"
                                        sequence, sequence_string = \
                                        trained_seqs[combination_index]

                                    if n_trials > 0:
                                        row_list.append([
                                            sess_num,
                                            sess_type,
                                            n_group,
                                            " ".join(seq_keys),
                                            seq_type,
                                            sequence_string,
                                            seq_train,
                                            seq_color[sequence_string],
                                            shuffled_combination,
                                            seq_index,
                                            paced,
                                            instruct,
                                            run + 1,  #run
                                            block + 1  # block
                                        ])

            sess_num = sess_num + 1

    schedule = pd.DataFrame(
        row_list,
        columns=("sess_num", "sess_type", "n_trials", "seq_keys", "seq_type",
                 "sequence_string", "seq_train", "seq_color", "combination",
                 "seq_order", "paced", "instruct", "run", "block"))

    #    schedule.loc[schedule["sess_num"] == 0, "sess_num"] = \
    #        np.max(schedule["sess_num"]) + 1
    #    schedule.sort_values(by = ["sess_num", "paced", "seq_train"],
    #                         inplace = True)

    if opts.schedule_file:
        schedulefilename = opts.schedule_file + "_s{}".format(sched_group)
    else:
        schedulefilename = "./scheduling/schedule{}".format(sched_group)

    if opts.split:
        schedule_home = \
        schedule.loc[schedule["sess_type"] != "fmri", :]
        schedule_fmri = \
        schedule.loc[schedule["sess_type"] == "fmri", :]

        schedule_home.to_csv(schedulefilename + ".csv", sep=";", index=False)
        schedule_fmri.to_csv(schedulefilename + "_fmri.csv",
                             sep=";",
                             index=False)
    else:
        schedule.to_csv(schedulefilename + ".csv", sep=";", index=False)
Esempio n. 6
0
def main():
    p = argparse.ArgumentParser(description='Chainer implementation of waifu2x')
    p.add_argument('--gpu', '-g', type=int, default=-1)
    p.add_argument('--seed', '-s', type=int, default=11)
    p.add_argument('--dataset_dir', '-d', required=True)
    p.add_argument('--validation_rate', type=float, default=0.05)
    p.add_argument('--nr_rate', type=float, default=0.65)
    p.add_argument('--chroma_subsampling_rate', type=float, default=0.5)
    p.add_argument('--reduce_memory_usage', action='store_true')
    p.add_argument('--out_size', type=int, default=64)
    p.add_argument('--max_size', type=int, default=256)
    p.add_argument('--active_cropping_rate', type=float, default=0.5)
    p.add_argument('--active_cropping_tries', type=int, default=10)
    p.add_argument('--random_half_rate', type=float, default=0.0)
    p.add_argument('--random_color_noise_rate', type=float, default=0.0)
    p.add_argument('--random_unsharp_mask_rate', type=float, default=0.0)
    p.add_argument('--learning_rate', type=float, default=0.00025)
    p.add_argument('--lr_min', type=float, default=0.00001)
    p.add_argument('--lr_decay', type=float, default=0.9)
    p.add_argument('--lr_decay_interval', type=int, default=5)
    p.add_argument('--batch_size', '-b', type=int, default=16)
    p.add_argument('--patches', '-p', type=int, default=64)
    p.add_argument('--validation_crop_rate', type=float, default=0.5)
    p.add_argument('--downsampling_filters', nargs='+', default=['box'])
    p.add_argument('--resize_blur_min', type=float, default=0.95)
    p.add_argument('--resize_blur_max', type=float, default=1.05)
    p.add_argument('--epoch', '-e', type=int, default=50)
    p.add_argument('--inner_epoch', type=int, default=4)
    p.add_argument('--finetune', '-f', default=None)
    p.add_argument('--model_name', default=None)
    p.add_argument('--color', '-c', default='rgb',
                   choices=['y', 'rgb'])
    p.add_argument('--arch', '-a', default='VGG7',
                   choices=['VGG7', '0', 'UpConv7', '1', 'ResNet10', '2', 'UpResNet10', '3'])
    p.add_argument('--method', '-m', default='scale',
                   choices=['noise', 'scale', 'noise_scale'],)
    p.add_argument('--noise_level', '-n', type=int, default=1,
                   choices=[0, 1, 2, 3])

    args = p.parse_args()
    if args.arch in srcnn.table:
        args.arch = srcnn.table[args.arch]

    utils.set_random_seed(args.seed, args.gpu)
    if args.color == 'y':
        ch = 1
        weight = (1.0,)
    elif args.color == 'rgb':
        ch = 3
        weight = (0.29891 * 3, 0.58661 * 3, 0.11448 * 3)
    weight = np.array(weight, dtype=np.float32)
    weight = weight[:, np.newaxis, np.newaxis]

    print('* loading filelist...', end=' ')
    filelist = utils.load_filelist(args.dataset_dir, shuffle=True)
    valid_num = int(np.ceil(args.validation_rate * len(filelist)))
    valid_list, train_list = filelist[:valid_num], filelist[valid_num:]
    print('done')

    print('* setup model...', end=' ')
    if args.model_name is None:
        if args.method == 'noise':
            model_name = 'anime_style_noise{}'.format(args.noise_level)
        elif args.method == 'scale':
            model_name = 'anime_style_scale'
        elif args.method == 'noise_scale':
            model_name = 'anime_style_noise{}_scale'.format(args.noise_level)
        model_path = '{}_{}.npz'.format(model_name, args.color)
    else:
        model_name = args.model_name.rstrip('.npz')
        model_path = model_name + '.npz'
    if not os.path.exists('epoch'):
        os.makedirs('epoch')

    model = srcnn.archs[args.arch](ch)
    if model.offset % model.inner_scale != 0:
        raise ValueError('offset %% inner_scale must be 0.')
    elif model.inner_scale != 1 and model.inner_scale % 2 != 0:
        raise ValueError('inner_scale must be 1 or an even number.')
    if args.finetune is not None:
        chainer.serializers.load_npz(args.finetune, model)

    if args.gpu >= 0:
        chainer.backends.cuda.check_cuda_available()
        chainer.backends.cuda.get_device(args.gpu).use()
        weight = chainer.backends.cuda.to_gpu(weight)
        model.to_gpu()

    optimizer = optimizers.Adam(alpha=args.learning_rate)
    optimizer.setup(model)
    print('done')

    valid_config = utils.get_config(args, model, train=False)
    train_config = utils.get_config(args, model, train=True)

    print('* check forward path...', end=' ')
    di = train_config.in_size
    do = train_config.out_size
    dx = model.xp.zeros((args.batch_size, ch, di, di), dtype=np.float32)
    dy = model(dx)
    if dy.shape[2:] != (do, do):
        raise ValueError('Invlid output size\n'
                         'Expect: {}\n'
                         'Actual: ({}, {})'.format(dy.shape[2:], do, do))
    print('done')

    print('* starting processes of dataset sampler...', end=' ')
    valid_queue = DatasetSampler(valid_list, valid_config)
    train_queue = DatasetSampler(train_list, train_config)
    print('done')

    best_count = 0
    best_score = 0
    best_loss = np.inf
    for epoch in range(0, args.epoch):
        print('### epoch: {} ###'.format(epoch))
        train_queue.reload_switch(init=(epoch < args.epoch - 1))
        for inner_epoch in range(0, args.inner_epoch):
            best_count += 1
            print('  # inner epoch: {}'.format(inner_epoch))
            start = time.time()
            train_loss = train_inner_epoch(
                model, weight, optimizer, train_queue, args.batch_size)
            if args.reduce_memory_usage:
                train_queue.wait()
            if train_loss < best_loss:
                best_loss = train_loss
                print('    * best loss on training dataset: {:.6f}'.format(
                    train_loss))
            valid_score = valid_inner_epoch(
                model, valid_queue, args.batch_size)
            if valid_score > best_score:
                best_count = 0
                best_score = valid_score
                print('    * best score on validation dataset: PSNR {:.6f} dB'
                      .format(valid_score))
                best_model = model.copy().to_cpu()
                epoch_path = 'epoch/{}_epoch{}.npz'.format(model_name, epoch)
                chainer.serializers.save_npz(model_path, best_model)
                shutil.copy(model_path, epoch_path)
            if best_count >= args.lr_decay_interval:
                best_count = 0
                optimizer.alpha *= args.lr_decay
                if optimizer.alpha < args.lr_min:
                    optimizer.alpha = args.lr_min
                else:
                    print('    * learning rate decay: {:.6f}'.format(
                        optimizer.alpha))
            print('    * elapsed time: {:.6f} sec'.format(time.time() - start))
Esempio n. 7
0
 def __init__(self):
     self.config = get_config()
     self.run()
Esempio n. 8
0
parser.add_argument('--config',
                    type=str,
                    default='configs/edges2handbags_folder.yaml',
                    help='Path to the config file.')
parser.add_argument('--output_path',
                    type=str,
                    default='.',
                    help="outputs path")
parser.add_argument("--resume", action="store_true")
parser.add_argument('--trainer', type=str, default='MUNIT', help="MUNIT|UNIT")
opts = parser.parse_args()

cudnn.benchmark = True

# Load experiment setting
config = get_config(opts.config)
max_iter = config['max_iter']
display_size = config['display_size']
config['vgg_model_path'] = opts.output_path

# Setup model and data loader
if opts.trainer == 'MUNIT':
    trainer = MUNIT_Trainer(config)
elif opts.trainer == 'UNIT':
    trainer = UNIT_Trainer(config)
else:
    sys.exit("Only support MUNIT|UNIT")
trainer.cuda()
train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(
    config)
train_display_images_a = torch.stack(
Esempio n. 9
0
def GenerateWave(opts):
    config = get_config()
    create_db = opts.create_db

    N_SCHEDULE_GROUPS = config["N_SCHEDULE_GROUPS"]
    N_CONFIGURATIONS = config["N_CONFIGURATIONS"]

    NEXP = int(opts.nsubjects)  #experimental
    NCONT = int(opts.nsubjects)  #control
    WAVE = int(opts.wave)
    OFFSET = int(opts.offset) if opts.offset else 0

    prefix = opts.prefix
    schedule_file = prefix + "1schedule"
    schedule_table_file = "./scheduling/tables/%s%d_schedule_table_v3.csv" % (
        prefix, WAVE)
    # code : 1102 wave (1 digit), group (1 digit), subjectID (2 digits)
    subjects = [prefix + "%d1%0.2d"%(WAVE, i + 1) for i in range(NEXP)] + \
               [prefix + "%d2%0.2d"%(WAVE, i + 1) for i in range(NCONT)]
    group = [1 for i in range(NEXP)] + \
               [2 for i in range(NCONT)]

    # Generate schedule table
    schedule_group = 0
    row_list = []
    # add test subjects
    subjects = subjects + ["%stest%d" % (prefix, i) for i in [1, 2, 3, 4]]
    group = group + [1, 1, 2, 2]

    configuration = 0  # change to start with a different config
    for isub, subject in enumerate(subjects):
        row_list.append({
            'SUBJECT':
            subject,
            'SCHEDULE_FILE':
            schedule_file + "_g%d_c%d_s%d" %
            (group[isub], configuration + 1 + OFFSET, schedule_group),
            'SCHEDULE_GROUP':
            schedule_group,
            'FMRI_SCHEDULE_FILE':
            schedule_file + "_g%d_c%d_s%d_fmri" %
            (group[isub], configuration + 1 + OFFSET, schedule_group),
            'CONFIGURATION':
            configuration + 1 + OFFSET
        })
        schedule_group = (schedule_group + 1) % N_SCHEDULE_GROUPS
        configuration = (configuration + 1) % N_CONFIGURATIONS

    schedule_table = pd.DataFrame(row_list,
                                  columns=[
                                      'SUBJECT', 'SCHEDULE_FILE',
                                      'FMRI_SCHEDULE_FILE', 'CONFIGURATION',
                                      'SCHEDULE_GROUP'
                                  ])
    schedule_table.to_csv(schedule_table_file, sep=";", index=False)
    print("Subjects: ", subjects)
    print(schedule_table)

    # create db and subjects
    if create_db:
        try:
            db_config_json = open("./db/db_config.json", "r")
            db_config = json.load(db_config_json)
            db_config_json.close()

            with sshtunnel.SSHTunnelForwarder(
                (db_config["REMOTEHOST"], int(db_config["REMOTEPORT"])),
                    ssh_username=opts.ssh_username,
                    ssh_password=db_config["SSH_PASS"],
                    ssh_pkey=os.path.abspath(db_config["KEY"]),
                    remote_bind_address=(
                        db_config["LOCALHOST"],
                        int(db_config["LOCALPORT"]))) as server:
                port = server.local_bind_port
                try:
                    engine_string = "mysql://%s:%s@%s:%d/%s" % (
                        opts.sql_username, opts.sql_password,
                        db_config["LOCALHOST"], port, db_config["DATABASE"])

                    engine = create_engine(engine_string)

                    engine.execute("DROP DATABASE IF EXISTS %s" %
                                   (db_config["DATABASE"]))
                    engine.execute("CREATE DATABASE %s" %
                                   (db_config["DATABASE"]))
                    engine.execute("DROP DATABASE IF EXISTS %s" %
                                   (db_config["DATABASE_FMRI"]))
                    engine.execute("CREATE DATABASE %s" %
                                   (db_config["DATABASE_FMRI"]))
                    for subject in subjects:
                        command = "DROP USER IF EXISTS '%s'@'localhost'; CREATE USER '%s'@'localhost' IDENTIFIED BY '%s'; "%(subject, subject, db_config["SSH_PASS"]) + \
                        "GRANT INSERT,SELECT,CREATE,INDEX ON %s.* TO '%s'@'localhost';"%(db_config["DATABASE"], subject) + \
                        "GRANT INSERT,SELECT,CREATE,INDEX ON %s.* TO '%s'@'localhost';"%(db_config["DATABASE_FMRI"], subject)
                        print(command)


#                            engine.execute(command)
                    engine.dispose()
                    print("Synced with database.")
                except exc.SQLAlchemyError as e:
                    print("Error:", e)

        except:
            print("Could not connect to database!")