Exemplo n.º 1
0
    def __init__(self) -> None:
        super().__init__()

        pg.mixer.pre_init(44100, -16, 4, 2048)

        pg.init()

        self._dim_screen = pg.Surface(self.screen.get_size()).convert_alpha()
        self._dim_screen.fill((0, 0, 0, 180))

        # needs to happen before we make any controllers
        controllers.base.initialize_controller(self._quit)

        # needs to happen after the video mode has been set
        images.initialize_images()

        # needs to happen after a valid mixer is available
        sounds.initialize_sounds()

        self._clock = pg.time.Clock()

        timer = model.Timer(self._clock)
        groups = model.Groups()
        model.initialize(groups, timer)

        self._paused = False
Exemplo n.º 2
0
def _initialization_tests() -> None:
    # Normally I would be running unit tests, but it is not possible to check
    #  exceptions once the classes are initialized.
    _assert_runtime_exception_raised(make_player)
    model.initialize(InitsTest.groups, InitsTest.timer)

    InitsTest.groups.empty()
    InitsTest.timer.reset()
Exemplo n.º 3
0
def initialize_everything(groups: model.Groups = None,
                          timer: model.Timer = None) -> None:
    if groups is None:
        groups = model.Groups()
    if timer is None:
        timer = MockTimer()

    initialize_pygame()
    model.initialize(groups, timer)
    initialize_controller(None)
    ScreenAccess.initialize()
Exemplo n.º 4
0
def client():
    app.config["TESTING"] = True
    os.environ["test"] = "True"
    model.initialize(app)

    model.init_db()
    client = app.test_client()

    yield client

    model.clear_db()
Exemplo n.º 5
0
    def build_network(self,
                      num_labels,
                      features,
                      max_len=None,
                      hidden_units=None,
                      l2=None,
                      use_cnn=None,
                      cnn_filter_size=None,
                      cnn_pool_size=None,
                      cnn_num_filters=None,
                      cnn_filter_sizes=None,
                      embedding_size=None,
                      DEBUG=False):
        """ Build the neural network used for training.

        :param num_labels:      Number of labels to classify
        :param features:        the input features we use
        :param max_len:     Configured window-size
        :param hidden_units:    Number of units in the MLP's hiddden layer
        :returns:               The cost function, the misclassification rate
                                function, the computation graph of the cost
                                function and the prediction function
        """
        logger.info(
            'building the network, with one CNN for left and one for right')
        hidden_units = hidden_units or self._config['hidden_units']
        logger.info('#hidden units: %d', hidden_units)
        # building the feature vector from input.
        mlp_in_e1, mlp_in_e2, mlp_in_dim = self.build_feature_vector_noMention(
            features)
        logger.info('feature vector size: %d', mlp_in_dim)

        mlp = MLP(activations=[Rectifier()],
                  dims=[mlp_in_dim, hidden_units],
                  seed=self.curSeed)
        initialize([mlp])
        before_out_e1 = mlp.apply(mlp_in_e1)
        before_out_e2 = mlp.apply(mlp_in_e2)
        hidden_to_output = Linear(name='hidden_to_output',
                                  input_dim=hidden_units,
                                  output_dim=num_labels)
        initialize([hidden_to_output])
        linear_output_e1 = hidden_to_output.apply(before_out_e1)
        linear_output_e2 = hidden_to_output.apply(before_out_e2)
        linear_output_e1.name = 'linear_output_e1'
        linear_output_e2.name = 'linear_output_e2'

        y_hat_e1 = Logistic(name='logistic1').apply(linear_output_e1)
        y_hat_e2 = Logistic(name='logistic2').apply(linear_output_e2)
        y_hat_e1.name = 'y_hat_e1'
        y_hat_e2.name = 'y_hat_e2'
        y_hat_e1 = debug_print(y_hat_e1, 'y_1', DEBUG)
        return y_hat_e1, y_hat_e2, before_out_e1, before_out_e2
Exemplo n.º 6
0
 def apply_cnn(self, l_emb1, l_size1, l_emb2, l_size2, r_emb1, r_size1,
               r_emb2, r_size2, embedding_size, mycnf):
     assert l_size1 == r_size1
     assert l_size2 == r_size2
     assert l_size1 == l_size1
     max_len = l_size1
     fv_len = 0
     filter_sizes = mycnf['cnn_config']['filter_sizes']
     num_filters = mycnf['cnn_config']['num_filters']
     for i, fw in enumerate(filter_sizes):
         conv_left = ConvolutionalActivation(
             activation=Rectifier().apply,
             filter_size=(fw, embedding_size),
             num_filters=num_filters,
             num_channels=1,
             image_size=(max_len, embedding_size),
             name="conv" + str(fw) + l_emb1.name,
             seed=self.curSeed)
         conv_right = ConvolutionalActivation(
             activation=Rectifier().apply,
             filter_size=(fw, embedding_size),
             num_filters=num_filters,
             num_channels=1,
             image_size=(max_len, embedding_size),
             name="conv" + str(fw) + r_emb1.name,
             seed=self.curSeed)
         pooling = MaxPooling((max_len - fw + 1, 1), name="pool" + str(fw))
         initialize([conv_left, conv_right])
         l_convinp1 = l_emb1.flatten().reshape(
             (l_emb1.shape[0], 1, max_len, embedding_size))
         l_convinp2 = l_emb2.flatten().reshape(
             (l_emb2.shape[0], 1, max_len, embedding_size))
         l_pool1 = pooling.apply(conv_left.apply(l_convinp1)).flatten(2)
         l_pool2 = pooling.apply(conv_left.apply(l_convinp2)).flatten(2)
         r_convinp1 = r_emb1.flatten().reshape(
             (r_emb1.shape[0], 1, max_len, embedding_size))
         r_convinp2 = r_emb2.flatten().reshape(
             (r_emb2.shape[0], 1, max_len, embedding_size))
         r_pool1 = pooling.apply(conv_right.apply(r_convinp1)).flatten(2)
         r_pool2 = pooling.apply(conv_right.apply(r_convinp2)).flatten(2)
         onepools1 = T.concatenate([l_pool1, r_pool1], axis=1)
         onepools2 = T.concatenate([l_pool2, r_pool2], axis=1)
         fv_len += conv_left.num_filters * 2
         if i == 0:
             outpools1 = onepools1
             outpools2 = onepools2
         else:
             outpools1 = T.concatenate([outpools1, onepools1], axis=1)
             outpools2 = T.concatenate([outpools2, onepools2], axis=1)
     return outpools1, outpools2, fv_len
Exemplo n.º 7
0
def run():

    global run_thread

    time.sleep(1)
    ini_file_path = './parameters.ini'
    population, global_parameters = model.initialize(ini_file_path)

    x = []
    y = [[], [], [], [], [], []]

    for n, t in enumerate(range(0,
                                global_parameters['number_of_generations'])):

        while (run_thread.pause):
            time.sleep(0.5)

        population.output_stats(global_parameters['output_file'])
        x.append(population.generations)
        for i in range(len(genotypes)):
            y[i].append(population.genotypes_count[genotypes[i]] /
                        sum(population.genotypes_count.values()))
        if n % 10 == 0:
            doc.add_next_tick_callback(partial(update, x=x, y=y))
            x = []
            y = [[], [], [], [], [], []]
        population.update()

        if run_thread.stop:
            return

    print('Done')
Exemplo n.º 8
0
def start():

    # is_linux = bool(platform.system() == r'Linux')

    # load router
    HandlerFactory().load_settings(router.routing_table)

    # config logger
    lc = LoggerConfig()
    if Config.LogLevel:
        lc.set_level(Config.LogLevel)
    if Config.LogFilePath:
        if not os.path.exists(Config.LogFilePath):
            os.makedirs(Config.LogFilePath)
        log_file_prefix = r'{}/all.log'.format(Config.LogFilePath)
        lc.add_handler(Config.LogLevel,
                       log_file_prefix,
                       when=r'midnight',
                       backupCount=Config.LogFileBackups)

    loop = asyncio.get_event_loop()

    # do cache, eventbus initialize
    loop.run_until_complete(initialize())

    # init
    coro = loop.create_server(ServerProtocol, Config.ServerHost,
                              Config.ServerPort)
    server = loop.run_until_complete(coro)
    app_log.info(r'Serving on {}'.format(server.sockets[0].getsockname()))
    Service().run()
    try:
        loop.run_forever()
    except Exception as e:
        app_log.exception('{}'.format(e))

    # shutdown
    server.close()
    loop.run_until_complete(server.wait_closed())
    loop.close()
    app_log.info(r'Server closed')
Exemplo n.º 9
0
batch_size_ph = dataset_init[2]
input_tensor = dataset_init[3]
target_tensor = dataset_init[4]
iterator_initializer_train = dataset_init[5]
iterator_initializer_test = dataset_init[6]
iterator_feed_dict_train = dataset_init[7]
iterator_feed_dict_test = dataset_init[8]

model_init = model.initialize(
    input_tensor,
    target_tensor,
    network_name,
    network_args,
    loss_name,
    stats_train_list,
    stats_test_list,
    optimizer_name,
    momentum,
    nesterov,
    weight_decay,
    lr_init,
    lr_decay,
    lr_schedule)

epoch_tensor = model_init[0]
train_begin = model_init[1]
train_step = model_init[2]
train_summary = model_init[3]
train_end = model_init[4]
test_begin = model_init[5]
test_step = model_init[6]
Exemplo n.º 10
0
def setUpModule() -> None:
    initialize_pygame()
    model.initialize(ProjectilesTest.groups, ProjectilesTest.timer)
Exemplo n.º 11
0
                )

                flash("The email address you entered ({}) \
                \nis a VALID email address! Thank you!".format(
                    request.form['email']))

                return redirect("/success")
            else:
                flash("Email is not valid!")
        else:
            flash("Email already exists!")

    return render_template('index.html')


@app.route("/success")
def success():
    emails = Email.select()
    return render_template("success.html", emails=emails)


@app.route("/remove/<email>")
def remove(email):
    Email.delete().where(Email.email == email).execute()
    return redirect("/success")


if __name__ == "__main__":
    initialize()
    app.run(debug=DEBUG, host=HOST, port=PORT)
Exemplo n.º 12
0
def setUpModule() -> None:
    initialize_pygame()
    model.initialize(ModTest.groups, ModTest.timer)
    ModTest.ability_data = AbilityData(10)
Exemplo n.º 13
0
def setUpModule() -> None:
    initialize_pygame()
    model.initialize(HumanoidsTest.groups, HumanoidsTest.timer)
import requests
import model as m

dataset = m.initialize()

unique_dates = list()

raw_data = requests.get(
    'https://api.covid19india.org/v2/state_district_wise.json')
raw_json = raw_data.json()

for state_dict in range(len(raw_json)):

    for i in range(len(dataset)):

        if raw_json[state_dict]['state'] == dataset[i]['name']:

            dataset[i]['districts'] = dict()

            for district_dict in range(
                    len(raw_json[state_dict]['districtData'])):

                district_name = raw_json[state_dict]['districtData'][
                    district_dict]['district']
                dataset[i]['districts'][district_name] = dict()
                dataset[i]['districts'][district_name]['confirmed'] = raw_json[
                    state_dict]['districtData'][district_dict]['confirmed']


def total_count(state_code):
Exemplo n.º 15
0
def setUpModule() -> None:
    initialize_pygame()
    model.initialize(ModTest.groups, ModTest.timer)
Exemplo n.º 16
0
def main(database_path, source_tsv):
    initialize(database_path)
    load_features(database_path, source_tsv)
Exemplo n.º 17
0
def initialize():
    analyzer = model.initialize()
    return analyzer
Exemplo n.º 18
0
def initzo():
    model.initialize()
Exemplo n.º 19
0
    print('device: ', params.device)

    # set random seed
    torch.manual_seed(11052018)
    if params.device.type is 'cuda': torch.cuda.manual_seed(11052018)

    # input
    train_dataloader = fetch_dataloader(params, train=True)
    val_dataloader = fetch_dataloader(params, train=False)

    # construct model
    # dims out (pytorch affine grid requires 2x3 matrix output; else perspective transform requires 8)
    model = model.STN(getattr(model, params.stn_module),
                      params).to(params.device)
    # initialize
    initialize(model)
    capacity = sum(p.numel() for p in model.parameters())

    loss_fn = torch.nn.CrossEntropyLoss().to(params.device)
    optimizer = torch.optim.Adam([{
        'params': model.transformer.parameters(),
        'lr': params.transformer_lr
    }, {
        'params': model.clf.parameters(),
        'lr': params.clf_lr
    }])
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, params.lr_step,
                                                params.lr_gamma)

    # train and eval
    print('\nStarting training with model (capacity {}):\n'.format(capacity),
Exemplo n.º 20
0
from ops import normalize, sparsify, shuffle
from inputs import argparser
from evaluation import *

import time
import os

np.seterr("raise")  # To stop execution on overflow warnings

if __name__ == '__main__':
    m_opts = argparser()
    print 'Model Options:'
    print m_opts

    print "Initializing model..."
    m_vars = initialize(m_opts)
    print "Model initialized, beginning training."

    # Setting up batch sized ranges of data. The last batch with size less
    # than 'batch_size' is ignored, but only for the current epoch.
    iter_idx = -1
    start_idx = range(0, m_vars['n_users'], m_opts['batch_size'])
    end_idx = start_idx[1:]

    minibatch_count = m_vars['n_users'] // m_opts['batch_size']
    if minibatch_count == len(start_idx):
        end_idx.append(m_vars['n_users'])

    # Learning rate 'lr' is a geometric series decay
    lr = m_opts['lr_alpha']*(1.0 + np.arange(minibatch_count*\
                    m_opts['num_epochs']))**(-m_opts['lr_tau'])
Exemplo n.º 21
0
def setUpModule() -> None:
    initialize_pygame()
    model.initialize(AbilitiesTest.groups, AbilitiesTest.timer)
    ability_data = AbilityData(**load_ability_data_kwargs('pistol'))

    AbilitiesTest.projectile_ability_data = ability_data
Exemplo n.º 22
0
def mirror(database_path):
    initialize(database_path)
    for i, features_file in enumerate(get_feature_files()):
        print "On item %d" % i
        loader.load_features(database_path, features_file)
        features_file.close()
Exemplo n.º 23
0
def setUpModule() -> None:
    initialize_pygame()
    ConstructorTest.groups = model.Groups()
    model.initialize(ConstructorTest.groups, MockTimer())
Exemplo n.º 24
0
                          drop_e=0)
    elif args.model == 'StandardRNN':
        model = gluonnlp.model.StandardRNN(args.rnn_cell,
                                           vocab_size,
                                           args.emb_size,
                                           args.hid_size,
                                           args.num_layers,
                                           dropout=args.dropout,
                                           tie_weights=args.tied)

    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    ar_loss = gluonnlp.loss.ActivationRegularizationLoss(args.alpha)
    tar_loss = gluonnlp.loss.TemporalActivationRegularizationLoss(args.beta)
    joint_loss = JointActivationRegularizationLoss(loss, ar_loss, tar_loss)

    model.initialize(init.Xavier(), ctx=ctxs)
    model.hybridize()
    if args.optimizer == 'SGD':
        trainer_params = {
            'learning_rate': args.lr,
            'momentum': 0,
            'wd': args.wdecay
        }
    elif args.optimizer == 'Adam':
        trainer_params = {
            'learning_rate': args.lr,
            'wd': args.wdecay,
            'beta1': 0,
            'beta2': 0.999,
            'epsilon': 1e-9
        }
Exemplo n.º 25
0
        minLat = body['minLat']
        minLng = body['minLng']
        maxLat = body['maxLat']
        maxLang = body['maxLng']
        points = m.getAllPointsReport(minLng, minLat, maxLang, maxLat)
        return jsonify({"All Points": points})
    except Exception as e:
        return jsonify({"Error": str(e)})


@app.route('/potholes', methods=['POST'])
def getPotholes():
    res = []
    try:
        body = request.get_json()
        res = m.getPotholes(body['latitude'], body['longitude'],
                            body['radius'], body['day'])
    except Exception as e:
        return jsonify(f"error:{e}")
    return jsonify({"potholes": res})


@app.route('/test')
def connectionTest():
    return "<h1> App is up and running</h1>"


if __name__ == "__main__":
    m.initialize()
    app.run(host="0.0.0.0", debug=True)
Exemplo n.º 26
0
google.__path__.append(os.path.join(vendor_dir, 'google'))

sys.path.insert(0, vendor_dir)

import web
import time
import model
import json
from web import form

urls = ('/', 'index', '/trains/?', 'index', '/trains/(.+)', 'traintime')

render = web.template.render('templates')
app_config = json.load(open('gtfs.config'))
api_key = json.load(open('mta_api_key'))['api_key']
model.initialize(api_key, app_config['gtfs_directory'],
                 app_config['cache_max_age'])


class index:
    def GET(self):
        return render.index(sorted(model.get_stops(),
                                   key=lambda stop: stop[1]))

    def POST(self):
        user_input = web.input(stations=[])
        raise web.seeother('/trains/' + ','.join(user_input.stations))


class traintime:
    def GET(self, train_stops):
        return render.arrivals(
Exemplo n.º 27
0
                                  sampler=contrib.data.IntervalSampler(len(test_dataset),
                                                                       nbatch_test),
                                  last_batch='discard')


###############################################################################
# Build the model
###############################################################################


ntokens = len(vocab)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid,
                       args.nlayers, args.dropout, args.tied)
if args.hybridize:
    model.hybridize(**hybridize_optional)
model.initialize(mx.init.Xavier(), ctx=context)

compression_params = None if args.gctype == 'none' else {'type': args.gctype, 'threshold': args.gcthreshold}
trainer = gluon.Trainer(model.collect_params(), 'sgd',
                        {'learning_rate': args.lr,
                         'momentum': 0,
                         'wd': 0},
                        compression_params=compression_params)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
if args.hybridize:
    loss.hybridize(**hybridize_optional)

###############################################################################
# Training code
###############################################################################
Exemplo n.º 28
0
def setUpModule() -> None:
    initialize_pygame()
    model.initialize(LaserTest.groups, LaserTest.timer)

    ability_data = AbilityData(**load_ability_data_kwargs('laser'))
    LaserTest.laser_ability = GenericAbility(ability_data)
Exemplo n.º 29
0
from flask import Flask

import model
from resources.message import messages_api
from resources.users import users_api

app = Flask(__name__)

app.register_blueprint(messages_api, url_prefix='/api/v1')
app.register_blueprint(users_api, url_prefix='/api/v1')
if __name__ == '__main__':
    model.initialize()
    app.run(debug=True)
    app.run(port=8080)
Exemplo n.º 30
0
    def deleteEntry(self):
        """
		Deletes an entry
		"""
        Entry.deleteEntry(self.currDate)
        self.entry.delete(1.0, tkinter.END)
        self.updateStatusBar("Last action: " + str(self.currDate) +
                             " - Entry deleted")

    def saveEntry(self):
        """
		Updates an entry
		"""
        contents = self.entry.get(1.0, tkinter.END)
        Entry.updateEntry(self.currDate, contents)
        self.updateStatusBar("Last action: " + str(self.currDate) +
                             " - Entry updated")

    def getEntry(self, date):
        """
		Gets the contents of an entry of a specified date
		"""
        return Entry.getEntry(date)


if __name__ == '__main__':
    initialize()
    window = tkinter.Tk()
    diary = Diary(window)
    window.mainloop()
Exemplo n.º 31
0
def run():
    if len(sys.argv) < 6:
        print(
            "** Usage: python " + sys.argv[0] +
            " <<Benchmark: tt/geo>> <<Model: bagofwords/seq2seq>> <<Input Vocab>> <<Word Embeddings>> <<Model Directory>> <<Train Set>> <<PCA Set>>"
        )
        sys.exit(1)

    np.random.seed(42)
    benchmark = sys.argv[1]
    config, words, reverse, model = initialize(benchmark=benchmark,
                                               model_type=sys.argv[2],
                                               input_words=sys.argv[3],
                                               embedding_file=sys.argv[4])
    model_dir = sys.argv[5]

    train_data = load_data(sys.argv[6], words, config.grammar.dictionary,
                           reverse, config.grammar.tokens, config.max_length)
    pca_data = load_data(sys.argv[7], words, config.grammar.dictionary,
                         reverse, config.grammar.tokens, config.max_length)
    config.apply_cmdline(sys.argv[8:])

    print("unknown", unknown_tokens)

    # Tell TensorFlow that the model will be built into the default Graph.
    # (not required but good practice)
    with tf.Graph().as_default():
        # Build the model and add the variable initializer Op
        model.capture_final_encoder_state = True
        model.build()
        loader = tf.train.Saver()

        # Create a session for running Ops in the Graph
        with tf.Session() as sess:
            loader.restore(sess, os.path.join(model_dir, 'best'))

            inputs, input_lengths, _, _ = train_data

            final_encoder_state = None
            final_encoder_size = None
            if config.rnn_cell_type == 'lstm':
                final_encoder_state = tf.concat([
                    model.final_encoder_state[-1].c,
                    model.final_encoder_state[-1].h
                ], 1)
                final_encoder_size = 2 * config.hidden_size
            else:
                final_encoder_state = model.final_encoder_state[-1]
                final_encoder_size = config.hidden_size

            final_states_arrays = []
            # capture all the final encoder states
            for input_batch, input_length_batch in get_minibatches(
                [inputs, input_lengths], config.batch_size):
                feed_dict = model.create_feed_dict(input_batch,
                                                   input_length_batch)
                state_array = sess.run(final_encoder_state,
                                       feed_dict=feed_dict)
                #print state_array.shape
                final_states_arrays.append(state_array)

            X = np.concatenate(final_states_arrays, axis=0)
            assert X.shape == (len(inputs), final_encoder_size)
            X = tf.constant(X)

            mean = tf.reduce_mean(X, axis=0)
            centered_X = X - mean
            S, U, V = tf.svd(centered_X)

            # take only the top 2 components
            V = V[:2]
            V_array, mean_array = sess.run([V, mean])

            inputs, input_lengths, _, _ = pca_data

            X = final_encoder_state
            centered_X = X - tf.constant(mean_array)
            transformed_X = tf.matmul(centered_X, tf.constant(V_array.T))

            feed_dict = model.create_feed_dict(inputs, input_lengths)
            X_pca = sess.run(transformed_X, feed_dict=feed_dict)

            sentences = reconstruct_sentences(inputs, words['<<EOS>>'],
                                              reverse)
            show_pca(X_pca, sentences)
Exemplo n.º 32
0
import web
import time
import model
import json
from web import form

urls = (
    '/', 'index',
    '/trains/?', 'index',
    '/trains/(.+)', 'traintime'
)

render = web.template.render('templates')
app_config = json.load(open('gtfs.config'))
api_key = json.load(open('mta_api_key'))['api_key']
model.initialize(api_key, app_config['gtfs_directory'], app_config['cache_max_age'])

class index:
    def GET(self):
        return render.index(sorted(model.get_stops(), key=lambda stop:stop[1]))

    def POST(self):
        user_input = web.input(stations=[])
        raise web.seeother('/trains/' + ','.join(user_input.stations))

class traintime:
    def GET(self, train_stops):
        return render.arrivals(model.get_trains_for_stops(train_stops.split(',')))

class styles:
    def GET(self, style):
Exemplo n.º 33
0
def run():
    if len(sys.argv) < 6:
        print(
            "** Usage: python " + sys.argv[0] +
            " <<Benchmark: tt/geo>> <<Model: bagofwords/seq2seq>> <<Input Vocab>> <<Word Embeddings>> <<Model Directory>>"
        )
        sys.exit(1)

    np.random.seed(42)
    benchmark = sys.argv[1]
    config, words, reverse, model = initialize(benchmark=benchmark,
                                               model_type=sys.argv[2],
                                               input_words=sys.argv[3],
                                               embedding_file=sys.argv[4])
    model_dir = sys.argv[5]

    config.apply_cmdline(sys.argv[6:])

    print("unknown", unknown_tokens)

    histfile = ".history"
    try:
        readline.read_history_file(histfile)
        # default history len is -1 (infinite), which may grow unruly
        readline.set_history_length(1000)
    except IOError:
        pass
    atexit.register(readline.write_history_file, histfile)

    # Tell TensorFlow that the model will be built into the default Graph.
    # (not required but good practice)
    with tf.Graph().as_default():
        with tf.device('/cpu:0'):
            # Build the model and add the variable initializer Op
            model.capture_attention = True
            model.build()
            loader = tf.train.Saver()

            # Create a session for running Ops in the Graph
            with tf.Session() as sess:
                loader.restore(sess, os.path.join(model_dir, 'best'))

                try:
                    while True:
                        line = input('> ').strip()
                        if not line:
                            continue

                        input, input_length = vectorize(
                            line, words, config.max_length)
                        fake_input, fake_length = vectorize(
                            'ig to fb', words, config.max_length)

                        feed = model.create_feed_dict(
                            [input, fake_input], [input_length, fake_length])
                        predictions, attention_scores = sess.run(
                            (model.pred, model.attention_scores),
                            feed_dict=feed)

                        assert len(predictions) == 2
                        assert len(attention_scores) == 2

                        decoded = list(
                            config.grammar.decode_output(predictions[0, 0]))
                        try:
                            decoded = decoded[:decoded.index(config.grammar.end
                                                             )]
                        except ValueError:
                            pass
                        output = [config.grammar.tokens[x] for x in decoded]

                        print(' '.join(output))

                        input = [reverse[x] for x in input[:input_length]]

                        show_heatmap(input, output, attention_scores[0])
                except EOFError:
                    pass
Exemplo n.º 34
0
def setUpModule() -> None:
    pygame_mock.initialize_pygame()

    groups = Groups()
    timer = pygame_mock.MockTimer()
    model.initialize(groups, timer)
Exemplo n.º 35
0
test_data = gluon.data.DataLoader(test_dataset,
                                  batch_size=args.batch_size,
                                  sampler=contrib.data.IntervalSampler(
                                      len(test_dataset), nbatch_test),
                                  last_batch='discard')

###############################################################################
# Build the model
###############################################################################

ntokens = len(vocab)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid,
                       args.nlayers, args.dropout, args.tied)
if args.hybridize:
    model.hybridize(**hybridize_optional)
model.initialize(mx.init.Xavier(), ctx=context)

compression_params = None if args.gctype == 'none' else {
    'type': args.gctype,
    'threshold': args.gcthreshold
}
trainer = gluon.Trainer(model.collect_params(),
                        'sgd', {
                            'learning_rate': args.lr,
                            'momentum': 0,
                            'wd': 0
                        },
                        compression_params=compression_params)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
if args.hybridize:
    loss.hybridize(**hybridize_optional)
Exemplo n.º 36
0
from flask import Flask, url_for
from flask.ext.login import LoginManager

app = Flask(__name__)

app.config.from_object('config')

from app import views
import model

model.initialize()