Example #1
0
    def train(self, token_lists, model_file):
        self.total = 0
        self.correct = 0
        self.incorrect = 0
        self.top2 = 0
        self.top3 = 0
        self.prefix_model_file = "./trained_model/prefix_final/prefix.tfl"
        self.suffix_model_file = "./trained_model/suffix_final/suffix.tfl"
        self.prepare_data(token_lists)
        # xs = xs[:40960]
        # ys = ys[:40960]

        (pxs, pys, sxs, sys) = self.getTrainData(token_lists)

        with tf.Graph().as_default():
            self.prefix_model = Models().create_network(
                len(self.string_to_number),
                len(self.string_to_number),
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=8,
                scope="prefix")

            self.prefix_model.load(self.prefix_model_file)
            self.prefix_model.fit(pxs,
                                  pys,
                                  n_epoch=1,
                                  batch_size=512,
                                  shuffle=True,
                                  show_metric=False,
                                  run_id="Prefix Completion")
            self.prefix_model.save(self.prefix_model_file)

        with tf.Graph().as_default():
            self.suffix_model = Models().create_network(
                len(self.string_to_number),
                len(self.string_to_number),
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=8,
                scope="suffix")
            self.suffix_model.load(self.suffix_model_file)
            self.suffix_model.fit(sxs,
                                  sys,
                                  n_epoch=1,
                                  batch_size=512,
                                  shuffle=True,
                                  show_metric=False,
                                  run_id="Suffix Completion")
            self.suffix_model.save(self.suffix_model_file)
Example #2
0
    def cross_test(self, rfclf, feature_models, evalTag = True):
        #load data
        [tt_idx, x_te, y_te] = np.load(self.data_file_test)
        if self.model_dict['lrTModel']:
            [x_te_title, y_te] = np.load(self.data_file_title_test)
        if self.model_dict['scoreModel']:
            [x_te_score] = np.load(self.data_file_score_test)
        if self.model_dict['scoreTModel']:
            [x_te_score_title] = np.load(self.data_file_score_title_test)
        if self.model_dict['dictModel']:
            dic_result = np.array(np.load(self.dict_result)[0])[tt_idx]
#            dic_result[dic_result > 0] = 1
#            dic_result[dic_result < 0] = -1

        #get features
        n_features = []

        for key in self.model_dict.keys():
            if self.model_dict[key]:
                x_test = x_te
                if key == 'dictModel':
                    n_features.append(dic_result)
                    continue
                elif key == 'scoreModel':
                    n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score))
                    continue
                elif key == 'scoreTModel':
                    n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score_title))
                    continue
                elif key == 'lrTModel':
                    x_test = x_te_title

                if key == 'lrModel' or key == 'lrTModel':
                    n_features.append(feature_models[key].predict_proba(x_test))
                else:
                    n_features.append(feature_models[key].predict(x_test))

        #start the second RF model
        x_second_te = np.column_stack(tuple(n_features))
        rf_result = rfclf.predict(x_second_te)
        rf_result_proba = rfclf.predict_proba(x_second_te)

        #evaluate the precision of model
        rf_rp = np.copy(rf_result)
        rf_rp[rf_result_proba.max(axis=1) < 0.5] = 0

        if evalTag:
            self.evaluate(rf_result, y_te)

#        self.DT.write_data('../data/negative.xls', self.origin_data_file,
#                          tt_idx[rf_result == -1], y_te[rf_result == -1])
#        self.DT.write_data('../data/positive.xls', self.origin_data_file,
#                          tt_idx[rf_result == 1], y_te[rf_result == 1])
#        self.DT.write_data('../data/zeros.xls', self.origin_data_file,
#                          tt_idx[rf_result == 0], y_te[rf_result == 0])
        return rf_result, y_te, rf_rp
Example #3
0
    def predict(self, pred_file = None):
         #load models
        rfclf = joblib.load(self.rfmodel_file)

        feature_models = dict()
        disable_models = ['dictModel', 'scoreModel', 'scoreTModel']
        for key in self.model_dict.keys():
            if self.model_dict[key] and (key not in disable_models):
                clf = joblib.load(self.model_dir + '/' + key)
                feature_models.setdefault(key, clf)
        
        if pred_file != None:
            test_data = self.pre_data_treate(pred_file)
            [x_te, x_te_title, x_te_score, x_te_score_title] = test_data
            
            #get features
            n_features = []
    
            for key in self.model_dict.keys():
                if self.model_dict[key]:
                    x_test = x_te
                    if key == 'scoreModel':
                        n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score))
                        continue
                    elif key == 'scoreTModel':
                        n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score_title))
                        continue
                    elif key == 'lrTModel':
                        x_test = x_te_title
    
                    if key == 'lrModel' or key == 'lrTModel':
                        n_features.append(feature_models[key].predict_proba(x_test))
                    else:
                        n_features.append(feature_models[key].predict(x_test))
            
            #start the second RF model
            x_second_te = np.column_stack(tuple(n_features))
            rf_result = rfclf.predict(x_second_te)
            rf_result_proba = rfclf.predict_proba(x_second_te)
    
            #evaluate the precision of model
            rf_rp = np.copy(rf_result)
            rf_rp[rf_result_proba.max(axis=1) < 0.5] = 0
    
            self.DT.write_data('../data/negative.xls', pred_file,
                              range(rf_result.shape[0]), rf_result)
            self.DT.write_data('../data/negative1.xls', pred_file,
                              range(rf_result.shape[0]), rf_rp)
        else:
            return self.cross_test(rfclf, feature_models)
    def initialise(self, param):
        self.parameters = param

        if not param.get('goal_selection_mode') == 'som':
            print('wrong goal selection mode, exit!')
            sys.exit(1)

        self.intrinsic_motivation = IntrinsicMotivation(param)

        if (self.parameters.get('train_cae_offline')) or (
                self.parameters.get('train_som_offline')):
            self.models = Models(param, train_images=self.train_images)
        else:
            self.models = Models(param)

        plot_encoded = self.models.encoder.predict(
            np.asarray([self.test_images[0:5]
                        ]).reshape(5, self.parameters.get('image_size'),
                                   self.parameters.get('image_size'),
                                   self.parameters.get('image_channels')))
        plots.plots_cae_decoded(
            self.models.decoder,
            plot_encoded,
            self.test_images[0:5],
            image_size=self.parameters.get('image_size'),
            directory=self.parameters.get('directory_pretrained_models'))

        self.experiment_id = param.get('experiment_id')
        self.iteration = 0

        self.pos = []
        self.cmd = []
        self.img = []

        self.goal_code = []

        self.current_goal_x = -1
        self.current_goal_y = -1
        self.current_goal_idx = -1
        self.prev_goal_idx = -1

        self.goal_image = np.zeros(
            (1, param.get('image_size'), param.get('image_size'),
             param.get('image_channels')), np.float32)

        np.random.seed()  # change the seed

        self.prev_pos = self.get_starting_pos()
Example #5
0
    def train(self, token_lists, model_file):
        self.model_file = "./trained_model/random3/randomhole.tfl"
        self.prepare_data(token_lists)

        xs, ys = self.getTrainData(token_lists)

        with tf.Graph().as_default():
            self.model = Models().create_network(
                self.in_max_int,
                self.out_max_int,
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=6,
                scope="randomhole")
            self.model.load(self.model_file)
            self.model.fit(xs,
                           ys,
                           n_epoch=1,
                           batch_size=256,
                           shuffle=True,
                           show_metric=False,
                           run_id="Random Hole Completion")
            self.model.save(self.model_file)
Example #6
0
def evaluate_model(trainX, trainy, testX, testy):
    epochs, batch_size = 15, 64
    verbose, n_steps, n_length = 0, 4, 32
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    model = 'lstm'
    if model == 'cnnlstm':
        trainX = trainX.reshape(
            (trainX.shape[0], n_steps, n_length, n_features))
        testX = testX.reshape((testX.shape[0], n_steps, n_length, n_features))
    elif model == 'convlstm':
        trainX = trainX.reshape(
            (trainX.shape[0], n_steps, 1, n_length, n_features))
        testX = testX.reshape(
            (testX.shape[0], n_steps, 1, n_length, n_features))
    model = Models(model, n_timesteps, n_features, n_outputs, n_steps,
                   n_length)
    model.model.fit(trainX,
                    trainy,
                    epochs=epochs,
                    batch_size=batch_size,
                    verbose=verbose)
    _, accuracy = model.model.evaluate(testX,
                                       testy,
                                       batch_size=batch_size,
                                       verbose=verbose)
    print('Accuracy: {:.4f}'.format(accuracy))
Example #7
0
def list_identifiers(status=[], first=0, max_size=0, order_by=None):
    db = get_db_instance()
    models = Models(db)
    model_identifiers = models.getIdentifiers()
    items = model_identifiers.all(status=status,
                                  skip=first,
                                  limit=max_size,
                                  order_by=('updated', 1))
    i = 0

    def show(x, alt, f=lambda a: a):
        return f(x) if x is not None else alt

    def isodate(t):
        return datetime.datetime.fromtimestamp(t).isoformat()

    for item in items:
        ean = show(item.ean, '-')
        asin = show(item.asin, '-')
        status = show(item.status, '-', str)
        updated = show(item.updated, '-', isodate)
        print(
            '#{i}\tasin:{asin}\tean:{ean}\tstatus:{status}\tupdated:{updated}'.
            format(i=i, asin=asin, ean=ean, status=status, updated=updated))
        i += 1
Example #8
0
    def define_model(self, num_chan):
        models = Models(input_shape=(self.height, self.width, num_chan + 1*flag_expand_chan),
                        classes=self.num_classes)
        if self.base_model == 'vgg16':
            models.vgg16()
        elif self.base_model == 'vgg19':
            models.vgg19()
        elif self.base_model == 'resnet50':
            models.resnet50()
        elif self.base_model == 'inceptionV3':
            models.inceptionV3()
        elif self.base_model == 'xception':
            models.xception()
        elif self.base_model == 'simple':
            models.simple()  # TODO
        elif self.base_model == 'simple_resnet':
            models.simple_resnet()
        elif self.base_model == 'pspnet':
            models.simple_pspnet()
        elif self.base_model == 'simple_cascade_atrous':
            models.simple_cascade_atrous()
        elif self.base_model == 'simple_parallel_atrous':
            models.simple_parallel_atrous()
        else:
            print('Uknown base model')
            raise SystemExit

        # models.compile(optimizer=RMSprop(lr=1e-3))

        # models.compile(optimizer=Adam()) # TODO
        sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
        models.compile(optimizer=sgd)
        self.model = models.get_model()
    def initialise(self, param):
        self.parameters = param
        self.intrinsic_motivation = IntrinsicMotivation(param)
        self.models = Models(param)

        self.exp_iteration = param.get('exp_iteration')
        self.iteration = 0

        self.pos = []
        self.cmd = []
        self.img = []

        self.goal_code = []

        self.current_goal_x = -1
        self.current_goal_y = -1
        self.current_goal_idx = -1
        self.prev_goal_idx = -1

        self.goal_image = np.zeros(
            (1, param.get('image_size'), param.get('image_size'),
             param.get('image_channels')), np.float32)

        np.random.seed()  # change the seed

        self.prev_pos = self.get_starting_pos()
Example #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default='configs/OT3+++R.yaml', help='Path to the config file.')
    parser.add_argument('--output_path', type=str, default='.', help="outputs path")
    parser.add_argument("--resume",default='True', action="store_true") #change to True is you need to retrain from pre-train model
    opts = parser.parse_args()

    cudnn.benchmark = True

    # Load experiment setting
    config = get_config(opts.config)

    # dataset set up
    dataset = My3DDataset(opts=config)
    train_loader = DataLoader(dataset=dataset, batch_size=config['batch_size'], shuffle=True, num_workers=config['nThreads'])


    config['vgg_model_path'] = opts.output_path

    trainer = Models(config)
    trainer.cuda()

    # Setup logger and output folders
    model_name = os.path.splitext(os.path.basename(opts.config))[0]
    output_directory = os.path.join(opts.output_path + "/outputs", model_name)
    train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/outputs/logs", model_name))
    checkpoint_directory, image_directory = prepare_sub_folder(output_directory)
    shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder

    # Start training
    iterations = trainer.resume(checkpoint_directory, hyperparameters=config,need_opt=False) if opts.resume else 0
    max_iter = int(config['n_ep']* len(dataset)/config['batch_size'])+1

    while True:
        for it,out_data  in enumerate(train_loader):
            for j in range(len(out_data)):
                out_data[j] = out_data[j].cuda().detach()
            if(config['models_name']=='dynamic_human' ):
                Xa_out, Xb_out, Yb_out, Xb_prev_out, Xb_next_out, Xa_mask, Yb_mask, rand_y_out, rand_y_mask=out_data
            trainer.update_learning_rate()
            with Timer("Elapsed time in update: %f"):
                # Main training code
                trainer.dynamic_gen_update(Xa_out, Xb_out, Yb_out,Xb_prev_out,Xb_next_out,
                                               Xa_mask, Yb_mask,rand_y_out, rand_y_mask, config)
                #torch.cuda.synchronize()
            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
                write_loss(iterations, trainer, train_writer)

            if (iterations ) % config['image_display_iter'] == 0:
                write_image2display(iterations, trainer, train_writer)

            # Save network weights
            if (iterations+1 ) % config['snapshot_save_iter'] == 0:
                trainer.save(checkpoint_directory, iterations)
            iterations += 1
            if iterations >= max_iter:
                sys.exit('Finish training')
Example #11
0
 def get_models(self):
     self.md = []
     out = Outcome()
     for x in out.data:
         name = [c for c in x][0]
         #print(name)
         model = Models(self.datas['actual'], x[name])
         self.md.append({'name': name, 'model': model})
Example #12
0
 def __init__(self, db_url, db_schema, **kwargs):
     self.engine = create_engine(db_url)
     Base.metadata.bind = self.engine
     DBSession.configure(bind=self.engine)
     self.models = Models(Base, db_schema)
     self.query = Query(self.models, DBSession)
     self.persen_denda = 'persen_denda' in kwargs and \
         kwargs['persen_denda'] or 0
Example #13
0
def delete(num):
    db = Models()
    if not db.view():
        return render_template('success.html', not_delete=True)
    else:
        if request.method == 'GET':
            db.delete(str(num))
            return render_template('success.html', delete=True)
Example #14
0
def make_model(train_image, train_label, test_image, test_label, name):
    try:
        m = Models(shape=train_image.shape[1:], name=name)
        model = exec('m.' + bp.USE_MODEL + '(train_image, train_label, test_image, test_label)')
        print('info: input_shape:{}'.format(train_image.shape[1:]))
    except Exception as e:
        print(e)
        exit()
Example #15
0
def main(args):
    pp = Preprocessing()

    # load data
    print("Loading Data.....\n\n")
    train_block, train_block_label = pp.read_train_file(
        args.train_data, args.train_label)
    test_block = pp.read_test_file(args.test_data)

    # explore data, do some visualization
    print("Exploring Data (see 'fig' folder for visualization) .....\n\n")
    viz = Visualization()

    # histogram for the lpc coefficient distribution
    viz.visualize_lpc_distribution(train_block)

    # histogram for the block length (or point of time) distribution
    viz.visualize_block_length_distribution(train_block)

    # plot one block of lpc coefficient for each speaker to look at the pattern of voice frequency
    viz.visualize_lpc_time_series(train_block)
    viz.visualize_fitted_lpc_series(train_block)

    max_length = 29
    final_block_size = 18

    print("Data Preprocessing (padding to fixed size blocks)....\n\n")
    # Take the best lengths (18), truncate the longer block, and pad the  shorter block by the last row
    train_data = pp.pad_to_fixed_size_blocks(train_block, max_length,
                                             final_block_size)
    test_data = pp.pad_to_fixed_size_blocks(test_block, max_length,
                                            final_block_size)

    # dummy test label for convenience
    test_block_label = [[i] for i in np.zeros(len(test_data))]

    print("Generating Features (for ML Algorithms)... \n\n")

    # Generate fixed length feature vector for traditional machine learning input
    final_train_data = pp.convert_to_vectors(train_data, train_block_label,
                                             final_block_size)
    final_test_data = pp.convert_to_vectors(test_data, test_block_label,
                                            final_block_size)

    # See scatter plot to find out if there is grouping based on feature vector
    viz.lpc_scatter_plot(final_train_data)

    # Looks like there is a grouping, so let's try to classify using some popular algorithm
    model = Models()
    model.run_classification_models(final_train_data, final_test_data)

    print("SVM Prediction Saved (see 'results/submission.txt' )... \n\n")

    #Also try LSTM for classification
    model.run_LSTM_model(np.array(train_data), np.array(train_block_label),
                         np.array(test_data))
    print("LSTM Prediction Saved (see 'results/submission_lstm.txt' )... \n\n")
Example #16
0
def main():
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=DeprecationWarning)
        import sklearn

    # eda = Eda()
    # cleaned_data = Cleaning()
    # features = Features()
    models = Models()
Example #17
0
    def creating_dataframe(self, dictionary):
        final_words = []
        final_words1 = []
        documents = []
        l = []
        z = []
        docs = {}
        keys = dictionary.keys()
        for key in keys:
            kk = str(key)
            k = re.findall(r'\d{8}', kk)
            l.append(k)
        for i in l:
            for j in i:
                z.append(j)
        for key in z:
            # if key == '19234329':
            print(
                "###################### Generating topic labels for {} ############################"
                .format(key))
            df = pd.DataFrame(dictionary[key])
            df.columns = ['Text']
            df_ = df['Text'].apply(lambda x: ''.join(x))
            df_ = df_.str.lower()
            df_ = df_.apply(self.tokenize)
            df_ = df_.apply(self.replace)
            df_ = df_.apply(self.split)
            df_ = df_.apply(self.terms_only)
            df_ = df_.apply(lambda x: ' '.join(x))
            df_ = df_.apply(lambda x: re.sub(r' +', ' ', x))
            [final_words.append("".join(i).strip().split()) for i in df_]
            [final_words1.append(i) for i in final_words if len(i) >= 5]
            [
                documents.append(re.sub(r' +', " ", (' '.join(i))))
                for i in final_words1
            ]

            if key in docs:
                docs[key].append(documents)
            else:
                docs[key] = documents

            mm = Models(5, 10, **docs)
            terms_to_wiki = mm.calling_methods('LDA')
            ll = Labels(terms_to_wiki)
            wiki_titles = ll.get_titles_wiki()
            equal_length = ll.remove_all_null_dicts_returned_from_wiki(
                **wiki_titles)
            frq = ll.calculating_word_frequency(**equal_length)
            results = ll.predicting_label(**frq)

            print(key, results)
        print('########### FINAL FILE EXECUTED ##################')
Example #18
0
    def _init_model(self):

        M = Models()
        model = M.FPN(img_ch=3, output_ch=1)
        # model = U_Net(img_ch=3, output_ch=1)

        if torch.cuda.device_count() > 1 and self.args.mgpu:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            model = nn.DataParallel(model)

        self.model = model.to(self.device)
Example #19
0
    def start(self):
        bag_of_words, words = TermFrequency(self.trained).create_vocabulary()

        v = Vectorizer(self.trained, self.classify, words, bag_of_words)

        tfidf_trained = v.tfidf_for_tweets_trained
        evaluations = v.evaluations
        tfidf_to_classify = v.tfidf_for_tweets_to_classify

        models = Models(tfidf_trained, evaluations, tfidf_to_classify)
        prediction = models.svm_linear()

        return prediction
Example #20
0
    def load(self, token_lists, model_file):
        self.model_file = "./trained_model/random4/randomhole.tfl"
        self.prepare_data(token_lists)

        with tf.Graph().as_default():
            self.model = Models().create_network(self.in_max_int,
                                                 self.out_max_int,
                                                 model_name="bidirectional_attention_rnn",
                                                 in_seq_len=self.in_seq_len, out_seq_len=self.out_seq_len,
                                                 num_layers=2, memory_size=128,
                                                 embedding_size=128, num_heads=8, scope="randomhole")

            self.model.load(self.model_file)
Example #21
0
def search(query):
    assert isinstance(query, str)
    # Model
    db = get_db_instance()
    models = Models(db)
    model_caches = models.getCaches()
    # Yahoo shopping API
    appid = get_yahoo_appid()
    assert appid is not None
    # Run
    y = YahooShopping(appid=appid, cache=model_caches)
    content = y.query(query=query, ignoreCache=False)
    print(content)
Example #22
0
def train():
    embeddings = np.load('text_embedding.npy', allow_pickle=True)
    sentiments = np.load('sentiments.npy', allow_pickle=True)
    texts = np.load('texts.npy', allow_pickle=True)
    all_texts = np.load('text_cache.npy', allow_pickle=True)
    categorical_sentiments = to_categorical(sentiments, num_classes=5)
    tokenizer = Tokenizer(num_words=300000, oov_token=None)
    tokenizer.fit_on_texts(all_texts)
    X_train, X_test, Y_train, Y_test = train_test_split(texts,
                                                        categorical_sentiments,
                                                        test_size=0.2)
    np.save("text_train.npy", X_train)
    np.save("sentiment_train.npy", Y_train)
    models = Models()
    logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = TensorBoard(log_dir=logdir)
    models = []
    bgruModel = "ensemble_bgru.h5"
    models.buil_pre_model(embeddings)
    model = models.model
    if os.path.isfile(filepath):
        model = load_model(filepath)

    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    callbacks_list = [checkpoint, tensorboard_callback]

    model.fit(pad_sequences(tokenizer.texts_to_sequences(X_train[:500000]),
                            maxlen=75),
              Y_train[:500000],
              batch_size=512,
              epochs=50,
              validation_data=(pad_sequences(tokenizer.texts_to_sequences(
                  X_test[:5000]),
                                             maxlen=75), Y_test[:5000]),
              callbacks=callbacks_list,
              shuffle=True)

    result = model.predict_on_batch(
        pad_sequences(tokenizer.texts_to_sequences([
            " What happened 2 ur vegan food options?! At least say on ur site so i know I won't be able 2 eat anything for next 6 hrs #fail",
            " I sleep hungry and It gets harder everyday",
            "everything is great, i have lost some weight",
            "awesome, really cool", "should I play cards",
            "I am full and inshape", "is it okay to be that hungry at night?"
        ]),
                      maxlen=75))
    print("result: ", np.argmax(result, axis=-1), "\n")
Example #23
0
    def load(self, token_lists, model_file):
        self.total = 0
        self.correct = 0
        self.incorrect = 0
        self.top2 = 0
        self.top3 = 0
        self.prefix_model_file = "./trained_model/prefix_final/prefix.tfl"
        self.suffix_model_file = "./trained_model/suffix_final/suffix.tfl"
        self.prepare_data(token_lists)

        with tf.Graph().as_default():
            self.prefix_model = Models().create_network(
                len(self.string_to_number),
                len(self.string_to_number),
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=8,
                scope="prefix")

            self.prefix_model.load(self.prefix_model_file)

        with tf.Graph().as_default():
            self.suffix_model = Models().create_network(
                len(self.string_to_number),
                len(self.string_to_number),
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=8,
                scope="suffix")
            self.suffix_model.load(self.suffix_model_file)
Example #24
0
 def __init__(self, logger=None):
   if logger is None:
     self.logger = logging.getLogger()
   else:
     self.logger = logger
   # Model
   db = get_db_instance()
   models = Models(db)
   model_caches = models.getCaches()
   # Proxy
   proxies = get_amazon_scraping_proxy()
   # Run
   self.amazon_scraper = AmazonScraper(cache=model_caches, proxies=proxies,
                                       logger=self.logger)
Example #25
0
def update(num):
    form = NoteUpdate(request.form)  # instantiating form to use the forms defined from the Form class in form.py
    if request.method == 'GET':
        return render_template('update.html', update=True, form=form, num=num)
    else:
        if not form.validate_on_submit():  # making sure that the form is validated before submission
            return render_template('update.html', update=True, not_validate=True, form=form, num=num)
        else:
            db = Models()
            name = request.form['note_name']
            subject = request.form['note_subject']
            content = request.form['note_content']
            db.update(str(num), name, subject, content)
            return render_template('success.html', update=True, form=form)
    def _init_model(self):

        criterion = nn.BCELoss()
        self.criterion = criterion.to(self.device)
        M = Models()
        model = M.PSP(img_ch=3, output_ch=1)

        self.model = model.to(self.device)
        # init_weights(self.model, 'kaiming', gain=1)
        # summary(self.model, input_size=(4, 448, 448))
        self.model_optimizer = optim.Adamax(model.parameters(),
                                            lr=1e-3,
                                            weight_decay=0.01)
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.model_optimizer, T_max=len(self.train_queue))
Example #27
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-i",
        "--eval-dir",
        required=True,
        help="List of problem instances, i.e., [filename].txt files.")
    parser.add_argument(
        "-o",
        "--output-dir",
        required=True,
        help="List of truth files, one for each txt file in our --eval-dir.")
    args = parser.parse_args()

    if os.path.exists(args.output_dir):
        logging.warn(
            '[%s] already exists! Will possibly override files in it!' %
            args.output_dir)
    else:
        logging.debug('Creating [%s].' % args.output_dir)
        os.makedirs(args.output_dir)

    texts = load_texts(args.eval_dir)
    scalers = load_scalers()
    models = Models()

    splitted_texts = split_texts(texts)

    # NOTE using standard scaler
    splitted_texts_vectors = to_vectors(splitted_texts,
                                        scalers['standard'].transform)

    logging.debug('Making predictions...')
    for i, chunks_vectors_tuple in enumerate(splitted_texts_vectors, 1):
        chunks_vectors_pairs = itertools.combinations(chunks_vectors_tuple, 2)
        confidences = [0, 0]  # (for false, for true)
        for a, b in chunks_vectors_pairs:
            pred_for_false, pred_for_true = models.classify_proba(a, b)
            # TODO if very confident about style change - break or something?
            confidences[0] += pred_for_false
            confidences[1] += pred_for_true

        y_pred = bool(np.argmax(confidences))
        with open(os.path.join(args.output_dir, 'problem-%d.truth' % i),
                  'w') as f:
            json.dump({'changes': y_pred}, f)

    logging.debug('Done.')
Example #28
0
    def __init__(self):
        cudnn.benchmark = True
        # Load experiment setting
        config = get_config('configs/OT3.yaml')
        self.trainer = Models(config)
        self.trainer.cuda(config['cuda_device'])

        # Setup logger and output folders

        self.trainer.resume('outputs/OT3/checkpoints',
                            hyperparameters=config,
                            need_opt=False)
        self.trainer.eval()
        self.config = config
        self.dataset = My3DDataset(opts=self.config, is_Train=False)
        self.test_loader = DataLoader(dataset=self.dataset,
                                      batch_size=self.config['batch_size'] * 5,
                                      shuffle=False,
                                      num_workers=self.config['nThreads'])
Example #29
0
    def _init_dataset(self):

        M = Models()

        if self.args.mgpu:
            self.batch_size = 28
            print('batch_size: ', self.batch_size)
            self.date = '/2020-05-06~11:38:23'
            self.Mo = M.FPN(img_ch=3, output_ch=1)
        else:
            self.batch_size = 7
            print('batch_size: ', self.batch_size)
            self.date = '/2020-05-25~05:51:58'
            self.Mo = U_Net(img_ch=3, output_ch=1)

        test_images = Angioectasias(self.abnormality, mode='test')
        self.test_queue = DataLoader(test_images,
                                     batch_size=self.batch_size,
                                     drop_last=False)
Example #30
0
 def __init__(self, logger=None):
     if logger is None:
         self.logger = logging.getLogger()
     else:
         self.logger = logger
     # Model
     db = get_db_instance()
     models = Models(db)
     model_caches = models.getCaches()
     self.model_identifiers = models.getIdentifiers()
     # Yahoo
     yahoo_appid = get_yahoo_appid()
     self.yahoo_api = YahooShopping(appid=yahoo_appid,
                                    cache=model_caches,
                                    logger=self.logger)
     # Amazon
     proxies = get_amazon_scraping_proxy()
     self.amazon_api = AmazonScraper(cache=model_caches,
                                     proxies=proxies,
                                     logger=self.logger)