def __init__(self):
        configs = get_configs()
        if configs['corpus'] != 'rock':
            fname = 'embedding-skipgram-bach.pkl'
            symbol_fname = 'rn2letter-bach.pkl'
        else:
            # fname = 'embedding-skipgram-rock.pkl'
            fname = 'embedding-rock-rn-10-500.pkl'
            symbol_fname = 'rn2letter-rock.pkl'

        print '...SkipGramNNWrapper, directory', os.getcwd()
        fpath = os.path.join('data', fname)
        print 'fname: ', fpath
        with open(fpath, 'rb') as p:
            embedding_dict = pickle.load(p)
        assert 'W1' in embedding_dict.keys() and \
            'syms' in embedding_dict.keys()
        for key, val in embedding_dict.iteritems():
            setattr(self, key, val)
        # normalize W1
        norm = np.linalg.norm(self.W1, axis=1)
        self.W1_norm = self.W1 / norm[:, None]

        self.rn2letter, self.letter2rn, self.syms = \
            update_syms(configs, symbol_fname, self.syms)

        print 'SkipGramNNWrapper, # of syms', len(self.syms)
def test_duplicate_by_transposing_and_into_letters():
    from config import get_configs
    from load_songs_tools import get_raw_data

    configs = get_configs()
    configs["augment_data"] = False
    configs["use_letternames"] = False
    seqs, syms = get_raw_data(configs)
    seqs, translation_dict = duplicate_by_transposing_and_into_letters(seqs)

    # want to store augmented_seqs
    fname_base = "%s-letters-augmented" % (configs["corpus"])

    print "...writing chord strings to file"
    fname = "%s.txt" % fname_base

    with open(fname, "w") as p:
        for seq in seqs:
            for ch in seq:
                p.write(ch + " ")
            p.write("\n")
    print ".finished writing chord strings to file"

    print "...pickling chord strings to file"
    fname = "%s.pkl" % fname_base
    with open(fname, "wb") as p:
        pickle.dump(seqs, p)
    print ".finished pickling chord strings to file"

    print "... pickling translation dict to file"
    fname = "%s-translation_dict.pkl" % fname_base
    with open(fname, "wb") as p:
        pickle.dump(translation_dict, p)
    print ".finished pickling translation dict to file"
Esempio n. 3
0
def generate_population(views=10, pop_size=10, verbose=0):
    '''
    种群初始化
    :param views: 视图个数
    :param pop_size: 种群大小
    :return:
    '''
    fusion_ways = config.get_configs()['fusion_ways']
    population = []
    population_set = set()
    while len(population) < pop_size:
    # for i in range(pop_size):
        # view_code at least contains two elements
        view_code = random.sample(range(0, views), k=random.randint(2, views))
        fusion_code = random.choices(range(0, len(fusion_ways)), k=len(view_code)-1)
        pop = view_code+fusion_code
        if verbose == 1:
            print(f'view_code:{view_code}')
            print(f'fusion_code:{fusion_code}')
            print(f'pop:{pop}')
            print('='*30)
        if utils.list2str(pop) not in population_set:
            population.append(pop)
            population_set.add(utils.list2str(pop))
    return population
Esempio n. 4
0
def run_handler(event, context):  # pylint: disable=unused-argument
    initialise_logging()
    config_bucket_name = os.environ["CONFIG_BUCKET"]
    output_bucket_name = os.environ["OUTPUT_BUCKET"]
    log.info("## Getting configs")
    configs = get_configs(config_bucket_name)
    test_runner = TestRunner(output_bucket_name, *configs.values())
    output = test_runner.run_and_upload(
        accounts=[k.split(".")[0] for k in configs.keys()]  # 123.json -> 123
    )
    log.info("## Output:")
    log.info(output)
    log.info("## Getting whitelist")
    whitelist = get_whitelist(config_bucket_name)
    log.info(whitelist)
    if os.environ.get("DIFF_ONLY"):
        new, fixed = test_runner.diff_previous_s3(output, whitelist)
        send_diff_message(new, fixed, output_bucket_name,
                          key_from_output(output))
    else:
        log.info("## Sending to slack")
        send_full_message(
            set_of_issues(output, whitelist),
            output_bucket_name,
            key_from_output(output),
        )
    return {"statusCode": 200}
def make_NGram():
    from load_songs_tools import load_songs
    from config import get_configs

    configs = get_configs()
    seqs = load_songs(configs)
    ngram = NGram(seqs, 2)
    return ngram
def test_PreprocessedSeq():
    configs = get_configs()
    seqs, syms = get_raw_data(configs)
    window = configs["window"]
    seq = seqs[0]
    seq[1] = 'a'
    seq_p = PreprocessedSeq(seq, syms, configs["window"])
    data = PreprocessedData(seqs[:2], syms, window)
def get_data(configs=None):
    if configs is None:
        configs = get_configs()
    seqs, syms = get_raw_data(configs)
    print 'get data, # of syms', len(syms)
    window = configs["window"]
    data = PreprocessedData(seqs, syms, window)
    return data
Esempio n. 8
0
def run_chord2vec():
    """
    To run the model.  To finishing the training early, set configs['max_iter'] to less.
    """
    configs = get_configs()
    configs['retrieve_model'] = False
    configs['max_iter'] = 1
    data = get_data(configs)
    nn, bigram = get_models(data, configs)
    print nn.plot_w1()
Esempio n. 9
0
def retrieve_skipgram_and_ngram():
    """
    By default, loads cached model.  To train new model, set configs['retrieve_model']
    as in the run_chord2vec function
    :return: neural-net skipgram, bigram model
    """
    configs = get_configs()
    data = get_data(configs)
    nn, bigram = get_models(data, configs)

    # saves a plot of the 2D PCA of the chord vectors
    nn.plot_w1()
Esempio n. 10
0
File: weights.py Progetto: nonva/JLM
def dump_trained_weights(experiment, verbose):
    config = get_configs(experiment)

    # Still need to load the model to build graph
    # Graph is not saved
    RNNLM_Model(config)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as session:
        session.run(init)
        saver.restore(
            session,
            os.path.join(experiment_path, str(experiment), "tf_dump",
                         'rnnlm.weights'))

        dump_vars = [
            'HMi', 'HMf', 'HMo', 'HMo', 'HMg', 'IMi', 'IMf', 'IMo', 'IMg',
            'LM', 'bi', 'bf', 'bo', 'bg', 'b2'
        ]

        if config.share_embedding:
            dump_vars += ['PM']
        else:
            dump_vars += ['UM']

        if config.V_table:
            dump_vars.remove('LM')
            for i, seg in enumerate(config.embedding_seg):
                if i != 0:
                    dump_vars += ['VT{}'.format(i)]
                dump_vars += ['LM{}'.format(i)]

        weight_dict = tf_weights_to_np_weights_dict(session, dump_vars)

        if config.D_softmax:
            # instead save the full patched embedding, split each block in "LM" into list of matrices
            blocks = []
            col_s = 0
            for size, s, e in config.embedding_seg:
                if e is None:
                    e = weight_dict['LM'].shape[0]
                blocks.append(weight_dict['LM'][s:e, col_s:col_s + size])
                col_s += size
            weight_dict['LM'] = blocks

        weight_dump_dir = os.path.join(experiment_path, str(experiment),
                                       "weights")
        dump_weights(weight_dict, weight_dump_dir, verbose)
Esempio n. 11
0
def get_trans():
    configs = get_configs()
    data = get_data()
    nn, ngram = get_models()
    # if bigram nn, it's from t to t-1, then need to transpose?
    nn_trans_dict = nn.predict_identity()
    nn_trans = nn_trans_dict.values()[0]
    print '...nn_trans type', type(nn_trans), nn_trans.shape
    nn_trans = nn_trans_dict['1']
    print nn_trans_dict.keys()
    print '...nn_trans type', type(nn_trans), nn_trans.shape
    nn_trans = np.exp(nn_trans)
    ngram_trans = ngram.ngram
    return ngram_trans, nn_trans, ngram, nn, configs, data
Esempio n. 12
0
def main():
    """

    All configuration done before hand in config.py. + ini files Treat config
    as readonly from here on in.

    """

    global config
    for config in config.get_configs(sys.argv[1:]):
        if config.previous_rev < config.latest_rev:
            prepare_build_env()
            update_build()
        else:
            print "Revision already built for %s" % config.platform_id
Esempio n. 13
0
def main():
    """

    All configuration done before hand in config.py. + ini files Treat config
    as readonly from here on in.

    """

    global config
    for config in config.get_configs(sys.argv[1:]):
        if config.previous_rev < config.latest_rev:
            prepare_build_env()
            update_build()
        else:
            print 'Revision already built for %s' % config.platform_id
Esempio n. 14
0
 def __init__(self, params):
     self.uid = ''
     self.openid = ''
     self.userSessionExpire = 604800
     self.params = params
     self.config = config.get_configs()
     self.accessTokenExpire = 6000
     self.db = pymysql.connect(host=self.config['mysql']['host'],
                               port=int(self.config['mysql']['port']),
                               user=self.config['mysql']['user'],
                               passwd=self.config['mysql']['password'],
                               db=self.config['mysql']['db'],
                               charset='utf8')
     self.cursor = self.db.cursor(cursor=pymysql.cursors.DictCursor)
     self.redis = MyRedis(self.config['redis'])
Esempio n. 15
0
    def initialize(self):
        # Called after socketio has initialized the namespace.
        self.history = []
        self.parsed_seqs_notes = {}
        self.unit_dur = 60/92.0

        self.ngram = retrieve_NGram()
        self.nn = retrieve_SkipGramNN()

        assert self.ngram.syms == self.nn.syms

        self.previous_sym = None
        self.previous_sym_ind = None
        self.n_suggestions = 5
        self.n_similar = 2

        self.suggestions = SuggestionList(self)
        self.suggestions_above = SuggestionList(self)

        self.config = get_configs()
        self.corpus = self.config['corpus']
        print '...corpus', self.corpus

        if self.config['use_letternames']:
            self.symbol_type = 'letter'
        else:
            self.symbol_type = 'roman'

        # need to correct some roman numerals
        print '# of syms: %d' % len(self.ngram.syms)
        self.syms = []
        for sym in self.ngram.syms:
            formatted_sym, valid = self.format_sym(sym)
            self.syms.append(formatted_sym)

        # print 'F#m in syms?', 'F#m' in self.syms

        # need to update the "spelling" of roman numerals in nn and ngram
        self.nn.syms = self.syms
        self.ngram.syms = self.syms

        self._rn2letter, self._letter2rn = self.load_rn2letter_dict()

        self.experiment_type = EXPERIMENT_TYPE

        self.logs = Logs(EXPERIMENT_TYPE, EXPERIMENT_TYPE_STRS)
def retrieve_NGram():
    configs = get_configs()
    if configs['corpus'] != 'rock':
        fname = 'bigram-bach.pkl'
        symbol_fname = 'rn2letter-bach.pkl'
    else:
        fname = 'bigram-rock.pkl'
        symbol_fname = 'rn2letter-rock.pkl'
    print '...retrieve_NGram fname', fname
    ngram = NGram(fname=fname)

    ngram.rn2letter, ngram.letter2rn, ngram.syms = \
        update_syms(configs, symbol_fname, ngram.syms)

    print 'retrieve_NGram, # of syms', len(ngram.syms)

    return ngram
Esempio n. 17
0
 def __init__(self):
     self.args = get_configs()
     set_random_seed(self.args.seed)
     print(self.args)
     self.performance_meters = self._set_performance_meters()
     self.reporter = self.args.reporter
     self.model = self._set_model()
     self.cross_entropy_loss = nn.CrossEntropyLoss().cuda()
     self.optimizer = self._set_optimizer()
     self.loaders = get_data_loader(
         data_roots=self.args.data_paths,
         metadata_root=self.args.metadata_root,
         batch_size=self.args.batch_size,
         workers=self.args.workers,
         resize_size=self.args.resize_size,
         crop_size=self.args.crop_size,
         proxy_training_set=self.args.proxy_training_set,
         num_val_sample_per_class=self.args.num_val_sample_per_class)
Esempio n. 18
0
def get_proxy_model():
    fname = None
    print fname
    assert fname is not None, "Error: no model to retrieve in the time being"
    with open(fname, "rb") as p:
        w1 = pickle.load(p)
        syms = pickle.load(p)
        model_loss = pickle.load(p)
        model_weights = pickle.load(p)
        # configs_reloaded = pickle.load(p)

    COMMON_TONE_CIRCLE = ["I", "iii", "V", "viio6", "ii", "IV", "vi", "I"]
    circle_name = COMMON_TONE_CIRCLE
    song_dict = make_song_dict(circle_name)

    configs_dummy = get_configs()
    fname, ax, vecs = plot_vec(w1, syms, configs_dummy, save=True, doPCA=True, return_ax=True)
    add_arrow_annotation(syms, vecs, song_dict, ax, False)
    plt.savefig("w1-%s.pdf" % Configs.get_timestamp())

    model = WrappeSkipGram(w1, syms)
    return model
Esempio n. 19
0
def get_segmented_songs(seqs=None, min_len=5):
    if seqs is None:
        from config import get_configs
        configs = get_configs()
        seqs = load_songs(configs)
    subseqs = []
    for seq in seqs:
        subseq = []
        for s in seq:
            subseq.append(s)
            if (s == 'I' or s == 'i') and len(subseq) > min_len:
                subseqs.append(subseq)
                subseq = []
    # with open('subseqs.txt', 'w') as p:
    #     lines = ''
    #     for seq in subseqs:
    #         line = ''
    #         for s in seq:
    #             line += '%s ' % s
    #         line += '\n'
    #         lines += line
    #     p.writelines(lines)
    return subseqs
Esempio n. 20
0
def auto_generate_sentence(experiment=1):
    gen_config = get_configs(experiment)
    gen_config.batch_size = gen_config.num_steps = 1

    gen_model = RNNLM_Model(gen_config)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as session:
        session.run(init)
        saver.restore(
            session,
            os.path.join(experiment_path, str(experiment), "tf_dump",
                         'rnnlm.weights'))
        starting_text = '<eos>'
        while starting_text:
            sen = generate_sentence(session,
                                    gen_model,
                                    gen_config,
                                    starting_text=starting_text,
                                    temp=1.0)
            print(' '.join([w.split('/')[0] for w in sen]))
            starting_text = input('> ')
Esempio n. 21
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
import model
import tfrecord
import config

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
    'eval_dir', config.get_configs('global.conf', 'eval', 'eval_log_dir'),
    """Evaluation log dir.""")
tf.app.flags.DEFINE_string(
    'eval_data', config.get_configs('global.conf', 'eval',
                                    'eval_tfrecord_dir'),
    """Evaluation data log.""")
tf.app.flags.DEFINE_string(
    'checkpoint_dir', config.get_configs('global.conf', 'model', 'model_dir'),
    """Checkpoint dir.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 1,
                            """Evaluation interval time(sec)""")
tf.app.flags.DEFINE_integer(
    'num_examples',
    int(config.get_configs('global.conf', 'eval', 'eval_data_count')),
    """Total number of train data.""")
tf.app.flags.DEFINE_boolean('run_once', True, """Only run once.""")
Esempio n. 22
0
parser.add_argument('hostname')
parser.add_argument('port', type=int)
parser.add_argument('--oid-batch-size',
                    dest='oid_batch_size',
                    type=int,
                    default=10)
parser.add_argument('--sessions', dest='sessions', type=int, default=1)
parser.add_argument('--rounds', dest='rounds', type=int, default=1)
parser.add_argument('--json', dest='json', action='store_true')
parser.set_defaults(json=False)

args = parser.parse_args()

configs = get_configs(hostname=args.hostname,
                      port=args.port,
                      oid_batch_size=args.oid_batch_size,
                      sessions=args.sessions,
                      rounds=args.rounds,
                      print_results='false')
results = []
for lib, config in configs.items():
    setup_cmd = config.get('setup')
    exec_cmd = config['exec']
    if setup_cmd:
        setup_res, stderr, code = subprocess_output(setup_cmd)
        if code != 0:
            raise Exception("stderr: {}".format(stderr.strip()))

    exec_res, exec_stderr, code = subprocess_output(exec_cmd)
    if code != 0:
        raise Exception("stderr: {}".format(
            exec_stderr.decode('utf-8').strip()))
Esempio n. 23
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
import config

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer(
    'batch_size',
    int(config.get_configs('global.conf', 'dataset', 'batch_size')),
    """Batch size.""")

IMAGE_HEIGHT = int(
    config.get_configs('global.conf', 'dataset', 'resize_image_height'))
IMAGE_WIDTH = int(
    config.get_configs('global.conf', 'dataset', 'resize_image_width'))
NUM_CLASSES = int(config.get_configs('global.conf', 'dataset', 'num_class'))
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = int(
    config.get_configs('global.conf', 'train', 'train_data_count'))
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = int(
    config.get_configs('global.conf', 'eval', 'eval_data_count'))

# Hyper parameters
MOVING_AVERAGE_DECAY = float(
    config.get_configs('global.conf', 'model', 'moving_average_decay'))
NUM_EPOCHS_PER_DECAY = float(
    config.get_configs('global.conf', 'model', 'num_epochs_per_decay'))
LEARNING_RATE_DECAY_FACTOR = float(
    config.get_configs('global.conf', 'model', 'learning_rate_decay_factor'))
INITIAL_LEARNING_RATE = float(
Esempio n. 24
0
def get_models(data=None, configs=None, save=False):
    if configs is None:
        configs = get_configs()

    if data is None:
        data = get_data(configs)

    # TODO: remove hack
    if configs["bigram"]:
        reduced_keys = [configs["reduced_key"]]
        data.keys = reduced_keys
        test_data = data.get_test_data()
        test_data.keys = reduced_keys

    retrieve_model = configs["retrieve_model"]
    model = SkipGramNN(data, configs)
    print "SkipGramNN, # of syms", len(model.syms)

    if not retrieve_model:
        model_loss = model.train()
        if save:
            model.save("skipgram-%s.pkl" % (configs["corpus"]))
            plt.clf()
            plt.plot(model.loss_curve)
            plt.savefig("losses-%s.png" % configs.name)
        print "=== train loss ==="
        print "loss: %.2f" % model_loss
        loss = model.check_loss()
        if not configs["regularize"]:
            assert np.allclose(loss, model_loss)

        if save:
            model_weights = model.weights.value
            fname = "w1-%s.pkl" % configs.name
            print fname
            with open(fname, "wb") as p:
                pickle.dump(model.W1.value, p)
                pickle.dump(data.syms, p)
                pickle.dump(model_loss, p)
                pickle.dump(model_weights, p)
                pickle.dump(configs, p)

            fname = "skipgram-bach.pkl"
            model.save(fname)
    else:
        fname = os.path.join("data", "test_skipgram_model.pkl")
        print fname
        assert fname is not None, "Error: no model to retrieve in the time being"
        with open(fname, "rb") as p:
            w1 = pickle.load(p)
            syms = pickle.load(p)
            model_loss = pickle.load(p)
            model_weights = pickle.load(p)
            configs_reloaded = pickle.load(p)
        for key in configs.keys():
            if key not in configs_reloaded.keys():
                print "no key", key
        for key in configs.keys():
            if key in configs_reloaded.keys():
                if configs[key] != configs_reloaded[key]:
                    print configs[key], configs_reloaded[key]

        # assert configs == configs_reloaded
        model.init_weights(model_weights, model_loss)

    train_seq_data = data.get_train_seqs_data()
    train_seqs = [seq for seq in train_seq_data.seqs]
    syms = data.syms

    # ngram_model = NGram(train_seqs, syms, 2, configs)
    ngram_model = NGram(data.seqs, syms, 2, configs)
    print "\n\nNgram, # of syms", len(ngram_model.syms)
    if save:
        ngram_model.save("bigram-%s.pkl" % (configs["corpus"]))
    print len(ngram_model.syms), len(model.data.syms)
    assert ngram_model.syms == model.data.syms

    return model, ngram_model
Esempio n. 25
0
tf.get_logger().setLevel('ERROR')
import numpy as np
from os.path import join as pjoin
import random
from sklearn import metrics
from multiprocessing import Pool
import time
import argparse
from data_utils.data_uitl import get_views
import config
from code2net import code2net
import population_init
import gen_offspring
import utils

paras = config.get_configs()
fusion_ways = paras['fusion_ways']
fused_nb_feats = paras['fused_nb_feats']
classes = paras['classes']
batch_size = paras['batch_size']
epochs = paras['epochs']
classes = paras['classes']
pop_size = paras['pop_size']
nb_iters = paras['nb_iters']
data_name = paras['data_name']

# ['add', 'mul', 'cat', 'max', 'avg']
# Only load all view once
data_base_dir = os.path.join('..', data_name)
view_data_dir = os.path.join(data_base_dir, 'view')
view_train_x, train_y, view_test_x, test_y = get_views(
from win10toast import ToastNotifier
from config import get_configs


def notify(notifier, title, msg, duration):
    # icon_path needs to be defined (even if invalid) so the executable will work fine
    notifier.show_toast(title=title, msg=msg, duration=duration, icon_path="")


def min2sec(mins):
    return mins * 60


if __name__ == "__main__":
    toaster = ToastNotifier()
    while True:
        configs = get_configs()
        notify(toaster, configs[1], configs[2], min2sec(configs[0]))
Esempio n. 27
0
import tensorflow as tf
import os
import sys
import time
import random
from PIL import Image
import config

IMAGE_HEIGHT = int(
    config.get_configs('global.conf', 'dataset', 'resize_image_height'))
IMAGE_WIDTH = int(
    config.get_configs('global.conf', 'dataset', 'resize_image_width'))
CHANNELS = int(config.get_configs('global.conf', 'dataset', 'channels'))
ORIGIN_DATASET = config.get_configs('global.conf', 'dataset',
                                    'origin_data_dir')
TRAIN_DATASET = config.get_configs('global.conf', 'dataset', 'train_data_dir')
EVAL_DATASET = config.get_configs('global.conf', 'dataset', 'eval_data_dir')
BATCH_SIZE = int(config.get_configs('global.conf', 'dataset', 'batch_size'))


def create(dataset_dir,
           tfrecord_path,
           tfrecord_name='train_tfrecord',
           width=IMAGE_WIDTH,
           height=IMAGE_HEIGHT):
    """Creat tfrecord dataset

    Arguments:
        dataset_dir: String, original data dir
        tfrecord_name: String, output tfrecord name
        tfrecord_path: String, output tfrecord path
Esempio n. 28
0
#encoding=utf-8
#author: liang xinyan
#email: [email protected]
import numpy as np
import shutil
import os
from PIL import Image
from tqdm import tqdm
from multiprocessing import Pool
opt = os.path
from config import get_configs
paras = get_configs()
image_size = paras['image_size']
data_name = paras['data_name']
w, h, c = image_size['w'], image_size['h'], image_size['c']

import sklearn


def read_image(fn):
    img = Image.open(fn)
    img = img.convert('L')
    # img = img.convert('RGB')
    img = img.resize((w, h))
    img = np.array(img)
    return np.array(img)

with open(opt.join(data_name, 'wnids.txt')) as f:
    labels = f.readlines()
    labels = [str.strip(i) for i in labels]
Esempio n. 29
0
def get_configs_data():
    configs = get_configs()
    data = get_data(configs)
    return configs, data
Esempio n. 30
0
File: app.py Progetto: ujfj1986/blog
		for name, f in filters.items():
			env.filters[name] = f
	add_template(env)

def datetime_filter(t):
	delta = int(time.time() - t)
	if delta < 60:
		return u'1分钟前'
	if delta < 3600:
		return u'%s分钟前' % (delta // 60)
	if delta < 86400:
		return u'%s小时前' % (delta // 3600)
	if delta < 604800:
		return u'%s天前' % (delta // 86400)
	dt = datetime.fromtimestamp(t)
	return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)

def init_database():
	db.create_engine(**db_config)

from config import get_configs
configs = get_configs()
server_config = configs['server']
db_config = configs['db']
session_config = configs['session']
init(server_config['host'], server_config['port'])
init_database()
init_jinja2(filters=dict(datetime=datetime_filter))
add_routes('handlers')
start()
Esempio n. 31
0
from __future__ import print_function
from datetime import datetime
import os.path
import math
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
import model
import tfrecord
import config
import eval_model

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
    'train_dir', config.get_configs('global.conf', 'model', 'model_dir'),
    """Train model checkpoint dir.""")

tf.app.flags.DEFINE_string(
    'eval_data_dir',
    config.get_configs('global.conf', 'eval', 'eval_tfrecord_dir'),
    """Evaluation data log.""")

tf.app.flags.DEFINE_integer(
    'max_steps', int(config.get_configs('global.conf', 'train', 'max_steps')),
    """Max training steps.""")

tf.app.flags.DEFINE_boolean('log_device_placement', False,
                            """Log device placement.""")

tf.app.flags.DEFINE_string(
Esempio n. 32
0
                        help="Type: Images (SVR) or Points (AE)")
    parser.add_argument("--batch_size",
                        default=10,
                        type=int,
                        help="batch size used for evaluation")
    parser.add_argument("--num_workers",
                        default=13,
                        type=int,
                        help="number of threads to launch")

    opt = parser.parse_args()

    os.makedirs('./results/', exist_ok=True)
    os.makedirs('./cache/', exist_ok=True)

    PRED_PATH, GDTH_PATH, OUTFILENAME, ROT, TYPE = get_configs(
        opt.method, opt.type)

    # Create the outfile
    RESULTS = {}
    for cat in os.listdir(GDTH_PATH):
        RESULTS[cat] = METRICS.copy()

    with open(OUTFILENAME, 'w') as file:
        json.dump(RESULTS, file)

    # Load all the meshes to be evaluated
    for cat in os.listdir(GDTH_PATH):  # For each category
        print("Doing for : ", CLASS_NAMES[cat])
        all_models = []
        if TYPE == 'Images2':  # The directory structure is different for Pixel2Mesh and MeshRCNN so accomodate that
            all_models = os.listdir(PRED_PATH + cat)
Esempio n. 33
0
from PIL import Image
import tensorflow as tf
import model
import config


FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('checkpoint_dir', 
                           config.get_configs('global.conf', 'model', 'model_dir'),
                           """Checkpoint dir.""")
tf.app.flags.DEFINE_string('input_img', 
                           './eval/bar/211-2-2.jpg',
                           """input image to be predicted.""")
tf.app.flags.DEFINE_integer('image_height', 
                            int(config.get_configs('global.conf', 'dataset', 'resize_image_height')),
                            """Resized image height.""")

tf.app.flags.DEFINE_integer('image_width', 
                            int(config.get_configs('global.conf', 'dataset', 'resize_image_width')),
                            """Resized image width.""")


def inputs(input, count=1, batch_size=1):
    """Get input image.

    """


    model.FLAGS.batch_size = batch_size
    img = Image.open(input)
    img = img.resize((FLAGS.image_width, FLAGS.image_height))
Esempio n. 34
0
 def get_by_main_key(key):
     if key in config.get_configs().keys():
         return config.get_configs()[key]
     return False
Esempio n. 35
0
 def get_partner_settings(partner_name):
     partner_name = partner_name.upper()
     if partner_name in config.get_configs()['partners'] :
         return config.get_configs()['partners'][partner_name]
     pass