Пример #1
0
 def construct_model(self):
     self.q_net = self.build_net()
     self.model = Model(net=self.q_net,
                        loss=MSE(),
                        optimizer=RMSProp(self.args.lr))
     # Target network
     self.target_q_net = self.build_net()
Пример #2
0
    def read(self, pb_path: str) -> Model:
        """Read TF file and load model.

        Parameters
        ----------
        pb_path : str
            Path to TF file

        Returns
        -------
        model : Model
            Loaded model

        """
        model = Model()

        # load tensorflow model
        graph_def = graph_pb2.GraphDef()
        try:
            f = open(path.abspath(pb_path), "rb")
            graph_def.ParseFromString(f.read())
            f.close()
        except IOError:
            print("Could not open file. Creating a new one.")

        # import graph
        model.graph = Importer.make_graph(graph_def)

        return model
Пример #3
0
def main(_):
    pp.pprint(FLAGS.__flags)

    if FLAGS.threads < np.inf:
        sess_config = tf.ConfigProto(
            intra_op_parallelism_threads=FLAGS.threads)
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_mem

    else:
        sess_config = tf.ConfigProto()
    if 'mmd' in FLAGS.model:
        from core.model import MMD_GAN as Model
    elif FLAGS.model == 'wgan_gp':
        from core.wgan_gp import WGAN_GP as Model
    elif 'cramer' in FLAGS.model:
        from core.cramer import Cramer_GAN as Model

    with tf.Session(config=sess_config) as sess:
        if FLAGS.dataset == 'mnist':
            gan = Model(sess,
                        config=FLAGS,
                        batch_size=FLAGS.batch_size,
                        output_size=28,
                        c_dim=1,
                        data_dir=FLAGS.data_dir)
        elif FLAGS.dataset == 'cifar10':
            gan = Model(sess,
                        config=FLAGS,
                        batch_size=FLAGS.batch_size,
                        output_size=32,
                        c_dim=3,
                        data_dir=FLAGS.data_dir)
        elif FLAGS.dataset in ['celebA', 'lsun']:
            gan = Model(sess,
                        config=FLAGS,
                        batch_size=FLAGS.batch_size,
                        output_size=FLAGS.output_size,
                        c_dim=3,
                        data_dir=FLAGS.data_dir)
        else:
            gan = Model(sess,
                        batch_size=FLAGS.batch_size,
                        output_size=FLAGS.output_size,
                        c_dim=FLAGS.c_dim,
                        data_dir=FLAGS.data_dir)

        if FLAGS.is_train:
            gan.train()
        elif FLAGS.print_pca:
            gan.print_pca()
        elif FLAGS.visualize:
            gan.load_checkpoint()
            visualize(sess, gan, FLAGS, 2)
        else:
            gan.get_samples(FLAGS.no_of_samples, layers=[-1])

        if FLAGS.log:
            sys.stdout = gan.old_stdout
            gan.log_file.close()
        gan.sess.close()
Пример #4
0
def test_import_file_updates_db_with_players():
    """
    End to end test asserting that a HTML file is parsed and imported into an
    in-memory SQLite database. 
    """
    model = Model(echo=False)

    # open the HTML file
    file = open(
        path.join(PROJECT_SOURCE_PATH, 'test', 'resources', 'squad.html'), 'r'
    )
    html = file.read()
    file.close()

    # parse the players
    players = parse_players(html)

    with model.session_scope() as session:

        # open a session and import our HTML file
        html_file = HTMLFile.from_file(
            path.join(PROJECT_SOURCE_PATH, 'test', 'resources', 'squad.html'))

        model.import_file(html_file)

        # retrieve the team from the database
        # and check that it has imported correctly
        new_team = session.query(Team).first()
        assert_equal(len(new_team.players), len(players))
Пример #5
0
def test():
    # 开始计时
    start = time.clock()
    test_date = time.time()
    data_path = './data/processed/prt_mfd_7.csv'
    save_path = './test/purchase/'
    col_start = 1
    col_end = 4
    train_begin = 0
    train_end = 397
    time_step = 15
    rnn_unit = 10
    lr = 0.0006
    run_times = 100
    # ---------------------------------------------
    if not os.path.exists(save_path):
        os.makedirs(save_path)


# ---------------------------------------------
    model = Model(save_path, test_date, data_path, col_start, col_end,
                  train_begin, train_end, time_step, rnn_unit, lr, run_times)
    test_y, test_predict = model.get_test_result()
    # -------------------------------
    end = time.clock()
    spend = end - start
    utils = Utils()
    test_score = utils.get_score(test_y, test_predict)
    describe = 'purchase(1308_1408,[purchase,redeem,mfd_7])'
    utils.save_test(test_y, test_predict, save_path, test_date, lr, run_times,
                    time_step, rnn_unit, spend, test_score, describe)
Пример #6
0
def predict_prepare(data_x = None,model_path = None,config_file = 'web_flask/LSTM/config.json'):

    config_file = config_file
    configs = json.load(open(config_file, 'r'))

    '''
    data_loader = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns'],
         normalise_meth=configs['data']['normalise']
    )

    # 用所有数据进行预测
    data_x,data_y = data_loader.get_all_data(configs['data']['sequence_length'],\
        normalise=configs['data']['normalise'])

    data_x = data_x[-1]
    '''
    model = Model()
    model_way = './web_flask/LSTM/saved_models/20052019-174244-e60.h5'
    model.load_model(model_way)

    predictions = model.predict_point_by_point(data_x)

    # print(predictions)

    ''' 每五分钟预测一个值,貌似是错的
Пример #7
0
def _parseModelNode(xmlSegments, xmlMaterials, lights, name, node):
    """
    parses a model node and creates a model instance.
    """
    
    params = {}
    
    segments = []
    sLights = []
    for child in list(node):
        if child.tag == "segment":
            segmentName = child.get('name')
            
            segment = loadXMLSegment(xmlSegments, segmentName)
            # load additional attributes into the datatype (rotation,translation,..)
            segment = _parseModelSegmentNode(xmlMaterials, lights, segment, child)
            segments.append(segment)
                
        elif child.tag == "state":
            xmlParseStateParam(child, params)
        
        elif child.tag == "light":
            name = child.get('name')
            for l in lights:
                if l.name==name:
                    sLights.append(l)
                    break
        else:
            print "WARNING: unknown model tag '%s'" % child.tag
    
    m = Model(segments=segments, params=params)
    m.setLights(sLights)
    
    return m
Пример #8
0
 def __init__(self, texfilespath='.'):
     super().__init__()
     self.colors = ColorScheme()
     self.arrow = Arrow('midarrow')
     self.model = Model()
     self.path = os.path.join(texfilespath,
                              'tex-elements/').replace('\\', '/')
Пример #9
0
def main(_):
    pp.pprint(vars(FLAGS))

    sess_config = tf.ConfigProto(
        device_count={"CPU": 3},
        inter_op_parallelism_threads=0,
        intra_op_parallelism_threads=0,
        allow_soft_placement=True)
    sess_config.gpu_options.allow_growth = True

    if FLAGS.model == 'mmd':
        from core.model import MMD_GAN as Model
    elif FLAGS.model == 'gan':
        from core.gan import GAN as Model
    elif FLAGS.model == 'wgan_gp':
        from core.wgan_gp import WGAN_GP as Model
    elif FLAGS.model == 'cramer':
        from core.cramer import Cramer_GAN as Model
    elif FLAGS.model == 'smmd':
        from core.smmd import SMMD as Model
    elif FLAGS.model == 'swgan':
        from core.smmd import SWGAN as Model
    else:
        raise ValueError("unknown model {}".format(FLAGS.model))

    #if FLAGS.multi_gpu:
    #    from core.model_multi_gpu import MMD_GAN as Model
    with tf.Session(config=sess_config) as sess:
        #sess = tf_debug.tf_debug.TensorBoardDebugWrapperSession(sess,'localhost:6064')
        #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
        if FLAGS.dataset == 'mnist':
            gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1,
                        data_dir=FLAGS.data_dir)
        elif FLAGS.dataset == 'cifar10':
            gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=32, c_dim=3,
                        data_dir=FLAGS.data_dir)
        elif FLAGS.dataset in ['celebA', 'lsun', 'imagenet']:
            gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=3,
                        data_dir=FLAGS.data_dir)
        else:
            gan = Model(
                sess, batch_size=FLAGS.batch_size,
                output_size=FLAGS.output_size, c_dim=FLAGS.c_dim,
                data_dir=FLAGS.data_dir)

        if FLAGS.is_train:
            gan.train()
            gan.pre_process_only()
        elif FLAGS.print_pca:
            gan.print_pca()
        elif FLAGS.visualize:
            gan.load_checkpoint()
            visualize(sess, gan, FLAGS, 2)
        else:
            gan.get_samples(FLAGS.no_of_samples, layers=[-1])

        if FLAGS.log:
            sys.stdout = gan.old_stdout
            gan.log_file.close()
        gan.sess.close()
Пример #10
0
 def setUp(self):
     super().setUp()
     for i in range(20):
         Model.create()
     self.paginator = BaseCursorPagination(Model.id,
                                           settings.PAGINATE_BY,
                                           model=Model)
Пример #11
0
def run():
    segmentation_model = Model(50, 50, 0.0009)
    get_batches = helper.gen_batch_function()
    with tf.Session() as sess:
        correct_label = tf.placeholder(
            tf.int32, [None, None, None, segmentation_model.no_of_classes],
            name='correct_label')
        input_image, keep_prob, last_layer = segmentation_model.create_model_graph(
            sess, cs.VGG_MODEL, correct_label)
        sess.run(tf.global_variables_initializer())
        for epoch in range(segmentation_model.no_of_epochs):
            print("EPOCH {} ....".format(epoch + 1))

            for image, label in get_batches(segmentation_model.batch_size):

                res, _, loss = sess.run([
                    last_layer, segmentation_model.train_op,
                    segmentation_model.cross_entropy_loss
                ],
                                        feed_dict={
                                            input_image: image,
                                            correct_label: label,
                                            keep_prob: 0.8
                                        })
                print("Loss: = {}".format(loss))
            print()

        run_inference_on_test(segmentation_model, sess, keep_prob, input_image)
Пример #12
0
    def make_simple_model(self) -> Model:
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 3]),
            dimension_format='NHWC',
        )

        # Conv
        conv = Conv('conv', [1, 4, 4, 1],
                    Float32(), {
                        'X': x,
                        'W': w
                    },
                    kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 4, 4, 1], Float32(), {'input': conv})

        # add ops to the graph
        graph.add_op_and_inputs(y)
        model = Model()
        model.graph = graph
        return model
Пример #13
0
def train_network(configs, dataloader):

    # build model
    model = Model(configs['data']['input_mode'],
                  configs['data']['output_mode'])
    model.build_model(configs['model'])

    # in-memory training
    out_seq_len = configs['data']['input_sequence_length'] if configs['data'][
        'output_mode'] == "many_to_many" else 1
    x_train, y_train = dataloader.get_train_data(
        in_seq_len=configs['data']['input_sequence_length'],
        out_seq_len=out_seq_len)

    x_test, y_test = dataloader.get_test_data(
        in_seq_len=configs['data']['input_sequence_length'],
        out_seq_len=out_seq_len)

    history = model.train(
        x_train,
        y_train,
        x_test,
        y_test,
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        shuffle=configs['model']['shuffle_training_data'],
        allow_early_stop=configs['training']['allow_early_stop'],
    )

    return model, history
Пример #14
0
def train(Model):
    """Train the model"""

    train_set, target, categoricals = utils.load_data(args.dataset,
                                                      args.file_name)
    dataset_train = Dataset(dataset=train_set,
                            categorical_indices=categoricals)
    target = dataset_train[target]
    dataset_train.drop(target, axis=1, inplace=True)
    (
        categorical_variables,
        non_categorical_variables,
    ) = dataset_train.get_variables_names()

    print("Creating cross products dataset")
    cross_products = dataset_train.cross_categorical_dataset()

    model = Model(categorical_variables, non_categorical_variables)
    model = model.build_model(cross_products)

    print("Training model")
    model.fit(
        [
            [
                train_set[categorical_variables],
                train_set[non_categorical_variables]
            ],
            cross_products,
        ],
        target,
        epochs=config.EPOCHS,
        validation_split=config.VALIDATION_SPLIT,
    )
Пример #15
0
def gradient_check():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'core/model.py')
    path2 = os.path.join(path, 'core/train.py')
    logger = get_logger('log', logpath=config.summary_dir+'/',
                        filepath=os.path.abspath(__file__), package_files=[path1, path2])

    logger.info(config)

    batch_sizes = [1,4,16,32,64,128,256,512,1024]

    precon = False
    for bs in batch_sizes:
        start_time = time.time()
        print("processing batch size {}".format(bs))

        # load data
        train_loader, test_loader = load_pytorch(config)

        # define computational graph
        sess = tf.Session()

        model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset))
        trainer = Trainer(sess, model_, train_loader, test_loader, config, logger)

        trainer.grad_check(sess, bs, precon)
        print('batch size {} takes {} secs to finish'.format(
            bs, time.time()-start_time))
        tf.reset_default_graph()

    precon = True
    for bs in batch_sizes:
        start_time = time.time()
        print("processing batch size {}".format(bs))

        # load data
        train_loader, test_loader = load_pytorch(config)

        # define computational graph
        sess = tf.Session()

        model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset))
        trainer = Trainer(sess, model_, train_loader, test_loader, config, logger)

        trainer.grad_check(sess, bs, precon)
        print('batch size {} takes {} secs to finish'.format(
            bs, time.time()-start_time))
        tf.reset_default_graph()
Пример #16
0
 def importRoad(self):
     model = Model()
     model.open(os.path.join(self.tempath, 'model_0.db'), 'w')
     importer = RoadImporter(model, self.mesh)
     for name in os.listdir(self.roadpath):
         if name.endswith('.txd'):
             importer.process(os.path.join(self.roadpath, name))
     model.close()
Пример #17
0
 def __init__(self, dataset):
     # self.imagegen = ImageDataGenerator(shear_range=0.2,zoom_range=0.2,horizontal_flip=True)
     self.dataset = dataset
     self.train_x,self.train_y = dataset[0]
     self.test_x,self.test_y = dataset[1]
     self.learning_rate = 0.16
     self.eps = 2e-9
     self.params = {}
     self.model = Model()
Пример #18
0
    def __init__(self, parent=None):
        super(ImportWindow, self).__init__(parent)

        # Manual import window instance.
        self.ui = ManualImportWindow()
        # Initialize manual import window.
        self.ui.setupUi(self)

        # Model instance.
        self.mongodb_obj = Model()
Пример #19
0
 def exportAll(self):
     builder = AllDataBuilder(self.grid, self.rect)
     builder.open(os.path.join(self.dstpath, 'alldata'))
     for i in range(0, self.MAX_LEVEL_NUM):
         model = Model()
         model.open(os.path.join(self.tempath, 'model_%d.db' % (i+1)), 'r')
         builder.build(model, i)
         model.close()
     builder.save()
     builder.close()
Пример #20
0
 def buildRoad(self):
     for i in range(self.MAX_LEVEL_NUM):
         src = Model()
         dst = Model()
         src.open(os.path.join(self.tempath, 'model_%d.db' % i), 'r')
         dst.open(os.path.join(self.tempath, 'model_%d.db' % (i+1)), 'w')
         builder = RoadBuilder(src, dst, self.mesh)
         builder.process(i)
         src.close()
         dst.close()
    def __init__(self):
        Model.__init__(self)
        winSize = Config().attr.game.window.size

        #Initializing a (i, j) list
        lins = int(winSize[0]/16)
        cols = int(winSize[1]/16)
        Config().attr.game.habitat.size = (lins, cols)
        self.gridSize = (lins, cols)
        self.keepAlive = []
        self.grid = [[None] * cols for i in range(lins)]
Пример #22
0
def evaluate(args):
    G = Model(net=mlp_G(), loss=None, optimizer=None)
    model_path = os.path.join(args.output_dir, args.model_name)
    print("Loading model from ", model_path)
    G.load(model_path)
    noise = get_noise(size=(128, args.nz))
    samples = G.forward(noise)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    save_path = os.path.join(args.output_dir, "evaluate.png")
    save_batch_as_images(save_path, samples)
Пример #23
0
def predict(test):
    # initialize dataLoader with split of 0

    cleaner.main_func()

    data = DataLoader(test, 0, configs['data']['columns'])
    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'], normalise=False)
    model = Model()
    model.load_model('saved_models/tracker.h5')
    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
    return "OK"
Пример #24
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    if not configs['training']['train']:
        model.load_model(filepath='saved_models/02102019-164727-e2.h5')
    else:
        model.train(
            x,
            y,
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            save_dir=configs['model']['save_dir']
        )
    # out-of memory generative training
    # steps_per_epoch = math.ceil(
    #     (data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'],
    #                                                configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
Пример #25
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataProcessor(os.path.join('data', configs['data']['filename']),
                         configs['data']['train_test_split'],
                         configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=".")

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions_pointbypoint = model.predict_point_by_point(x_test)
    plot_results(predictions_pointbypoint, y_test)

    predictions_fullseq = model.predict_sequence_full(
        x_test, configs['data']['sequence_length'])
    plot_results(predictions_fullseq, y_test)
Пример #26
0
def main(train_after=False):
    config_file = 'web_flask/LSTM/config.json'
    configs = json.load(open(config_file, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(configs['data']['filename'],
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      normalise_meth=configs['data']['normalise'])

    model = Model()
    model.build_model(configs) if not train_after else \
        model.load_model(os.path.join( configs['model']['save_dir'],configs['model']['model_name']))
    history = LossHistory()

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'],
                history=history,
                x_test=x_test,
                y_test=y_test)
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    '''

    history.loss_plot('epoch')
    #loss, accuracy = model.model.evaluate(x_test, y_test)
    #print(loss,accuracy)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x[0])  #_test)

    #plot_results_multiple(predictions, y, configs['data']['sequence_length'])
    plot_results(predictions, y)
def main(model_name=None):
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()

    if model_name:
        saved_model = os.path.join(configs['model']['save_dir'], model_name)
        model.load_model(saved_model)
    else:
        model.build_model(configs)

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
        # in-memory training
        model.train(
            x,
            y,
            epochs = configs['training']['epochs'],
            batch_size = configs['training']['batch_size'],
            save_dir = configs['model']['save_dir']
        )
    '''

    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    if not model_name:
        model.train_generator(data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #28
0
def main():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'core/model.py')
    path2 = os.path.join(path, 'core/train.py')
    logger = get_logger('log', logpath=config.summary_dir+'/',
                        filepath=os.path.abspath(__file__), package_files=[path1, path2])

    logger.info(config)

    # load data
    train_loader, test_loader = load_pytorch(config)

    # define computational graph
    sess = tf.Session()

    model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset))
    trainer = Trainer(sess, model_, train_loader, test_loader, config, logger)

    trainer.train()
Пример #29
0
Файл: run.py Проект: t-k-/nbnp
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # create output directory for saving result images
    if not os.path.exists('./output'): os.mkdir('./output')

    # define network we are going to load
    net = Net([
        Conv2D(kernel=[5, 5, 1, 6], stride=[1, 1], padding="SAME"),
        ReLU(),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Conv2D(kernel=[5, 5, 6, 16], stride=[1, 1], padding="SAME"),
        ReLU(),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Flatten(),
        Dense(120),
        ReLU(),
        Dense(84),
        ReLU(),
        Dense(10)
    ])

    # load the model
    model = Model(net=net, loss=SoftmaxCrossEntropyLoss(), optimizer=Adam())
    print('loading pre-trained model file', args.model_path)
    model.load(args.model_path)

    # create pyplot window for on-the-fly visualization
    img = np.ones((1, 28, 28, 1))
    fig = disp_mnist_batch(img)

    # actual visualization generations

    layer_name = 'conv-layer-1'
    print('[ ' + layer_name + ' ]')
    images = am_visualize_conv_layer(model, 0, fig)
    save_batch_as_images('output/{}.png'.format(layer_name),
                         images,
                         title='visualized feature maps for ' + layer_name)

    layer_name = 'conv-layer-2'
    print('[ ' + layer_name + ' ]')
    images = am_visualize_conv_layer(model, 3, fig)
    save_batch_as_images('output/{}.png'.format(layer_name),
                         images,
                         title='visualized feature maps for ' + layer_name)
Пример #30
0
    def __init__(self, parent=None):
        super(MainForm, self).__init__(parent)

        # Main window instance.
        self.ui = Ui_MainWindow()
        # Initialize the main window user interface.
        self.ui.setupUi(self)

        # Model instance.
        self.mongodb_obj = Model()

        # Initialize in advance.
        for i in range(1, 5):
            self._init_all(i)

        # Video player instance.
        self.video_window = VideoWindow()
Пример #31
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('../data', configs['data']['filename']),
                      os.path.join('../data', configs['data']['VIMfile']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)

    '''
    # Out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test, p0_vec = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    pred = predictions.reshape((predictions.size, 1))

    #plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    #plot_results(pred, y_test) #normalised predictions

    # De-normalise & plot
    p_pred, p_true = denorm_transform(p0_vec, pred, y_test)
    plot_results(p_pred, p_true)  #de-normalised, i.e., original fex units

    # Compute evaluation metrics
    assess = EvalMetrics(p_true, p_pred)
    MAE = assess.get_MAE()
    RMSE = assess.get_RMSE()
    print("MAE on validation set is: %f" % MAE)
    print("RMSE on validation set is: %f" % RMSE)
Пример #32
0
def predict():
    configs = json.load(open(CONFIG, 'r'))

    data = DataLoader(DATA, configs['data']['train_test_split'],
                      configs['data']['columns'])

    global model
    if model == None:
        model = Model()
        model.load_model(MODEL)

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    if TYPE == "sequence":
        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        plot_results_multiple(predictions, y_test,
                              configs['data']['sequence_length'])
    if TYPE == "point" or TYPE == "predict":
        predictions = model.predict_point_by_point(x_test)
    if TYPE == "full":
        predictions = model.predict_sequence_full(
            x_test, configs['data']['sequence_length'])
    if TYPE == "full" or TYPE == "point":
        plot_results(predictions, y_test)
    if TYPE == "predict":
        predicted_value = data.denormalize_windows(
            predictions[-1], configs['data']['sequence_length'])
        sys.stdout.write("--END--{}--END--\n".format(predicted_value))
    else:
        sys.stdout.write("--END--")
Пример #33
0
def initialisation():
    global beta, GLOBAL_REF_MODEL
    dataset_path = '.\\dataset\\*.txt'
    models = []
    for file_name in glob.glob(dataset_path):
        models.append(Corpus(path_to_corpus=file_name).freq_result)
    m = Model(*models)
    beta = m.beta
    GLOBAL_REF_MODEL = m.global_ref_model
Пример #34
0
    async def test_pagination_items_per_page(self):
        items_per_page = 2
        paginator = BaseCursorPagination(Model.id, items_per_page, model=Model)
        paginator.model = Model
        queryset = Model.select().order_by(Model.id)
        queryset = paginator.first(queryset, None, None)
        result = queryset.execute()

        self.assertEqual(len(result), items_per_page)
Пример #35
0
    async def test_pagination_first_page(self):
        queryset = Model.select().order_by(Model.id)
        queryset = self.paginator.first(queryset, None, None)
        result = queryset.execute()
        ids = [i.id for i in result]

        self.assertEqual(len(result), settings.PAGINATE_BY)
        self.assertIn(1, ids)
        self.assertIn(settings.PAGINATE_BY, ids)
Пример #36
0
 def importBack(self):
     modelist = []
     for i in range(self.MAX_LEVEL_NUM):
         model = Model()
         model.open(os.path.join(self.tempath, 'model_%d.db' % (i+1)), 'a')
         modelist.append(model)
     importer = BackImporter(modelist, self.grid)
     # back_0
     for name in os.listdir(self.backpath_0):
         if name.endswith('.txd'):
             importer.process(os.path.join(self.backpath_0, name), 0, 2)
     for name in os.listdir(self.backpath_2):
         if name.endswith('.txd'):
             importer.process(os.path.join(self.backpath_2, name), 2, 4)
     for name in os.listdir(self.backpath_4):
         if name.endswith('.txd'):
             importer.process(os.path.join(self.backpath_4, name), 4, 6)
     for model in modelist:
         model.close()
Пример #37
0
def cnn_model():
    net = Net([
        Conv2D(kernel=[3, 3, 1, 2]),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Conv2D(kernel=[3, 3, 2, 4]),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Flatten(),
        Dense(1)
    ])
    return Model(net, loss=MSE(), optimizer=SGD())
Пример #38
0
 def to_dict(self):
     self.planProgress = self.get_plan_progress()
     self.creatorDisplayName = user.get_user_display_name(self.creator_id)
     from contact.contactservice import ContactService
     self.taskAssigneeName = ContactService.get_instance().get_contact_name(self.assignee_id)
     from task.taskservice import TaskService
     self.componentKeys = map(lambda x: x.component_id, TaskService.get_instance().fetch_taskcomponents(self.key()))
     self.affectedVersionKeys = map(lambda x: x.version_id, TaskService.get_instance().fetch_taskversions(self.key(), True))
     self.fixedVersionKeys = map(lambda x: x.version_id, TaskService.get_instance().fetch_taskversions(self.key(), False))
     
     tdt = Model.to_dict(self)
     tdt['storyPoints'] = "%0.1f" % self.story_points if self.story_points is not None else ""
     return tdt
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
Пример #40
0
    def setUp(self):
        """ Sets up tests and starts mock patchers """

        # mock open(file) and os.path.getmtime(file)
        patcher_open = patch('builtins.open')
        patcher_mtime = patch('os.path.getmtime')
        self.patchers = [patcher_open, patcher_mtime]
        self.addCleanup(self.cleanUp)

        self.mock_open = patcher_open.start()
        self.mock_open.return_value.__enter__ = lambda s: s
        self.mock_open.return_value.__exit__ = mock.Mock()
        self.mock_open.return_value.read.return_value = '<html><title>Battrick - Squad</title></html>'

        self.mock_mtime = patcher_mtime.start()
        self.test_date = datetime(2015, 1, 1, 12, 0, 10)

        self.mock_session = mock.Mock(spec=Session)

        self.mock_mtime.return_value = mktime(
            self.test_date.timetuple()) + 1e-6 * self.test_date.microsecond

        self.model = Model(echo=False)
Пример #41
0
 def __init__(self):
     Model.__init__(self)
     self.habitat = Habitat()
     self.state = Game.STATE_PREPARING
Пример #42
0
class TestModel(TestCase):
    """ Tests the model class. """

    def cleanUp(self):
        """ Stops all unittest.mock patch objects """
        for patcher in self.patchers:
            patcher.stop()

    def setUp(self):
        """ Sets up tests and starts mock patchers """

        # mock open(file) and os.path.getmtime(file)
        patcher_open = patch('builtins.open')
        patcher_mtime = patch('os.path.getmtime')
        self.patchers = [patcher_open, patcher_mtime]
        self.addCleanup(self.cleanUp)

        self.mock_open = patcher_open.start()
        self.mock_open.return_value.__enter__ = lambda s: s
        self.mock_open.return_value.__exit__ = mock.Mock()
        self.mock_open.return_value.read.return_value = '<html><title>Battrick - Squad</title></html>'

        self.mock_mtime = patcher_mtime.start()
        self.test_date = datetime(2015, 1, 1, 12, 0, 10)

        self.mock_session = mock.Mock(spec=Session)

        self.mock_mtime.return_value = mktime(
            self.test_date.timetuple()) + 1e-6 * self.test_date.microsecond

        self.model = Model(echo=False)

    @mock.patch('core.model.parse_players')
    @mock.patch('sqlalchemy.orm.query.Query.count')
    @mock.patch('sqlalchemy.orm.query.Query.first')
    @mock.patch('sqlalchemy.orm.session.Session.add')
    @mock.patch('core.model.parse_team_id')
    def test_import_file_calls_parse_players_if_squad_file(self, mock_parse_team_id, mock_add, mock_first, mock_count, mock_parse_players):
        """ Tests that parse_players is called if the HTML file is a Squad file """
        html_file = HTMLFile()
        html_file.type = PageTypes.Squad.value
        html_file.HTML = 'some HTML'

        mock_parse_players.return_value = []
        mock_parse_team_id.return_value = 3000

        team = Team()
        mock_first.result = team

        self.model.import_file(html_file)
        mock_parse_players.assert_called_with(html_file.HTML)

    @mock.patch('core.model.parse_pavilion')
    @mock.patch('sqlalchemy.orm.query.Query.first')
    @mock.patch('core.model.parse_team_id')
    @mock.patch('core.PyBatBase.Team.add_ranking')
    def test_import_file_calls_parse_pavilion_if_pavilion_file(self, mock_ranking,mock_parse_team_id, mock_first, mock_parse_pavilion):
        html_file = HTMLFile()
        html_file.type = PageTypes.Pavilion.value
        html_file.HTML = 'some HTML'
        

        mock_parse_pavilion.return_value = []
        mock_parse_team_id.return_value = 3000

        team = Team()
        team.rankings.append = mock.Mock()

        mock_first.result = team
        self.model.import_file(html_file)

        assert mock_parse_team_id.called

    @mock.patch('core.model.parse_team_id')
    @mock.patch('sqlalchemy.orm.session.Session.query')
    @mock.patch('sqlalchemy.orm.session.Session.add')
    def test_import_file_retrieves_team_from_file(self, mock_add, mock_query, mock_parse_team_id):

        test_id = 49800

        team = Team()

        html_file = HTMLFile()
        html_file.HTML = 'Some HTML'

        mock_query.return_value.filter_by = mock.Mock()

        mock_parse_team_id.return_value = test_id
        mock_result = mock.Mock(spec=Query)
        mock_result.count = mock.Mock()
        mock_result.count.return_value = 1

        mock_query.return_value.filter_by.return_value = mock_result

        self.model.import_file(html_file)

        mock_query.assert_called_with(Team)
        assert mock_parse_team_id.called
        mock_query.return_value.filter_by.assert_called_with(id=test_id)

    @mock.patch('sqlalchemy.orm.session.Session.query')
    def test_has_teams_returns_false_if_no_teams(self, mock_query):

        mock_query.return_value.count.return_value = 0

        result = self.model.has_teams()

        assert_false(result)

    @mock.patch('sqlalchemy.orm.session.Session.query')
    def test_has_teams_returns_true_if_teams(self, mock_query):

        mock_query.return_value.count.return_value = 1

        result = self.model.has_teams()

        assert_true(result)
Пример #43
0
 def to_dict(self):
     self.creatorDisplayName = user.get_user_display_name(self.creator_id)
     from contact.contactservice import ContactService
     self.groupName = ContactService.get_instance().get_group_name(self.group_id)
     return Model.to_dict(self)
Пример #44
0
 def to_dict(self):
     from contact.contactservice import ContactService
     self.contactName = ContactService.get_instance().get_contact_name(self.contact_id)
     return Model.to_dict(self)
Пример #45
0
 def to_dict(self):
     self.creatorDisplayName = user.get_user_display_name(self.creator_id)
     return Model.to_dict(self)
Пример #46
0
 def to_dict(self):
     self.bind_user_account = user.get_user(user_id=self.bind_user_id).u_account if self.bind_user_id != None and self.bind_user_id != model.EMPTY_UID else ""
     self.creatorDisplayName = user.get_user_display_name(self.creator_id)
     from contact.contactservice import ContactService
     self.groupKeys = list(set(map(lambda x: x.group_id, ContactService.get_instance().fetch_contactgroups(contact_id=self.key()))))
     return Model.to_dict(self)