コード例 #1
0
def mlp_classifier(matrixes,pred_path, weights_path, type):
    # Train model
    mlp_classifier = MLPClassifier(hidden_layer_sizes=(650, ), activation='relu', solver='adam', alpha=0.0001,
                                batch_size='auto', learning_rate='adaptive', learning_rate_init=0.01,
                                power_t=0.5, max_iter=1000, shuffle=True, random_state=None, tol=0.0001,
                                verbose=False, warm_start=True, momentum=0.8, nesterovs_momentum=True,
                                early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
                                epsilon=1e-08)

    mlp_classifier.fit(x_train, y_train)

    # Save model
    model_file = os.path.join(weights_path,type,"mlp_classifier.pkl")
    pickle.dump(mlp_classifier,open(model_file, 'wb'))

    # Predict
    pred_probs = mlp_classifier.predict_proba(x_test)
    matrixes.append(confusion_matrix(y_test,np.argmax(pred_probs, axis=-1)))

    # Results
    model_name = "MLP classifier"
    display_results(model_name, y_test, pred_probs)

    pred_file = os.path.join(pred_path,type,"mlp_classifier.pkl")
    with open(pred_file, 'wb') as f:
        pickle.dump(pred_probs, f)
コード例 #2
0
def fit_mlp(image_size=(28, 28),
            datasets='../data/mnist.pkl.gz',
            outpath='../output/mnist_lenet.params',
            n_hidden=500,
            learning_rate=0.01,
            L1_reg=0.00,
            L2_reg=0.001,
            n_epochs=1000,
            batch_size=20,
            patience=10000,
            patience_increase=2,
            improvement_threshold=0.995):

    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    classifier = MLP(rng=rng.RandomState(SEED),
                     input=x,
                     n_in=reduce(np.multiply, image_size),
                     n_hidden=n_hidden,
                     n_out=10)
    cost = (classifier.negative_log_likelihood(y) + L1_reg * classifier.L1 +
            L2_reg * classifier.L2)
    learner = SupervisedMSGD(index, x, y, batch_size, learning_rate,
                             load_data(datasets), outpath, classifier, cost)

    best_validation_loss, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs,
        patience=patience,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold)
    display_results(best_validation_loss, elapsed_time, epoch)

    return learner
コード例 #3
0
def fit_autoencoder(image_size=(28, 28),
                    n_image_channels=1,
                    datasets='../data/mnist.pkl.gz',
                    outpath='../output/mnist_autoencoder.params',
                    n_visible=784,
                    n_hidden=500,
                    learning_rate=0.01,
                    corruption_level=0.0,
                    n_epochs=1000,
                    batch_size=20,
                    patience=10000,
                    patience_increase=2,
                    improvement_threshold=0.995):

    index = T.lscalar()
    x = T.dmatrix(name='input')

    encoder = AutoEncoder(np_rng=rng.RandomState(SEED),
                          input=x,
                          th_rng=None,
                          n_visible=n_visible,
                          n_hidden=n_hidden,
                          corruption_level=corruption_level)
    learner = UnsupervisedMSGD(index, x, batch_size, learning_rate,
                               load_data(datasets), outpath, encoder,
                               encoder.cost)
    best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs,
        patience=patience,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold)
    display_results(best_validation_error, elapsed_time, epoch)

    return learner
コード例 #4
0
ファイル: traceworks.py プロジェクト: santoshs/traceworks
    def execute_query(self):
        global trace_mismatch_entry
        global trace_mismatch_exit
        global trace_state_set

        # Read the state of the trace data
        if (trace_state_set == False):
            with open(trace_state_file) as f:
                for l in f:
                    entries = l.split(': ')
                    if (entries[0] == 'trace_mismatch_entry'):
                        trace_mismatch_entry = entries[1]
                    elif (entries[0] == 'trace_mismatch_exit'):
                        trace_mismatch_exit = entries[1]
                trace_state_set = True

        for q in self.args.query:
            if q <= 0 or q > len(self.queries):
                print("Invalid query number {}".format(q))
                continue

            query = self.queries[q - 1]
            if 'query' not in query:
                print("Query not implemented")
                continue

            # print disclaimer if trace is incomplete
            if (trace_mismatch_entry or trace_mismatch_exit):
                if ("disclaimer" in query):
                    print(query["disclaimer"])

            if "args" in query and len(query['args']) > 0:
                if not self.args.qargs:
                    print("query '{}' requires {} argument(s)".format(
                        query["name"], len(query["args"])))
                    exit(1)
                qstr = query['query'].format(*self.args.qargs)
            else:
                qstr = query['query']

            tables = self.table_list_from_config()
            self.cursor.execute(
                "SELECT name FROM sqlite_master WHERE type='table'")

            r = self.cursor.fetchall()
            db_tables = []
            for t in r:
                db_tables.append(t[0])

            if not set(db_tables).intersection(set(tables)):
                print(
                    'Please generate the database from ftrace before querying')
                exit(1)

            self.cursor.execute(qstr)
            r = self.cursor.fetchall()
            col_names = list(map(lambda x: x[0], self.cursor.description))
            display_results(col_names, r)

        return
def main(args):

    # Load model
    print("[INFO] Loading model {}".format(args['model']))
    model = load_model(args['model'])
    model.summary()

    # Load image from the directory
    images, file_paths = load_image_from_directory(args['data_path'])

    if not os.path.exists(
            os.path.join('../extracted_features', args['model'] + '.npy')):
        if not os.path.exists('../extracted_features'):
            os.mkdir('../extracted_features')

        # Extract feature
        print("[INFO] Extracting feature from image using {}".format(
            args['model']))
        feature_vectors = model.predict(images[0])

        # Store feature to disk
        file_features = os.path.join('../extracted_features',
                                     '{}'.format(args['model']))
        np.save(file_features, feature_vectors)

    # Testing load feature from saving local
    print("[INFO] Loading extracted features from disk ...")
    with open(
            os.path.join('../extracted_features', args['model']) + '.npy',
            'rb') as f:
        feature_vectors = np.load(f)

    # Load query image
    try:
        qimage = cv2.imread(args['query_path'])
        #cv2.imshow('Query image', qimage)
        #cv2.waitKey(0)
    except:
        print("!!! Invalid path.")
        exit(0)

    # Extract feature of query image
    qimage = np.expand_dims(cv2.resize(qimage, IMAGE_SIZE, cv2.INTER_AREA),
                            axis=0)
    qvector = model.predict(qimage)[0]

    # Get the similarities between feature vectors and query
    print(
        "[INFO] Calculating the similarities between feature vectors and query vector ..."
    )
    similarities = cosin_similarity(feature_vectors, qvector)

    # Ranking base on similarities
    indices = np.argsort(similarities)[::-1]

    # Display results
    display_results(qimage[0], args['query_path'], file_paths, args['model'],
                    indices, args['number'])
コード例 #6
0
ファイル: model.py プロジェクト: zxy9815/Behavior_Cloning
def main(_):
    # read data
    X_train, y_train = read_data(FLAGS.data_dir)

    print(X_train.shape, y_train.shape)

    # create model
    model = create_model()

    # train
    history_object = train(model, X_train, y_train)

    # display
    display_results(history_object)
コード例 #7
0
def fit_lenet(image_size=(28, 28), n_image_channels=1,
              datasets='../data/mnist.pkl.gz', outpath='../output/mnist_lenet.params',
              filter_shape=(5, 5), nkerns=(2, 6), pool_size=(2,2), n_hidden=500,
              learning_rate=0.01, L1_reg=0.00, L2_reg=0.001,
              n_epochs=1000, batch_size=20, patience=10000,
              patience_increase=2, improvement_threshold=0.995):


    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    classifier = LeNet(
        rng=rng.RandomState(SEED),
        input=x,
        batch_size=batch_size,
        n_image_channels=n_image_channels,
        image_size=image_size,
        nkerns=nkerns,
        filter_shape=filter_shape,
        pool_size=pool_size,
        n_hidden=n_hidden
    )
    cost = (
        classifier.negative_log_likelihood(y)
        + L1_reg * classifier.L1
        + L2_reg * classifier.L2
    )
    learner = SupervisedMSGD(
        index,
        x,
        y,
        batch_size,
        learning_rate,
        load_data(datasets),
        outpath,
        classifier,
        cost
    )

    best_validation_loss, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs,
        patience=patience,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold
    )
    display_results(best_validation_loss, elapsed_time, epoch)

    return learner
コード例 #8
0
def main(_):
    # read data
    samples = read_data(FLAGS.data_dir)

    # data split
    train_samples, validation_samples = train_test_split(samples, test_size=0.2)

    # create model
    model = create_model()

    # train
    history_object = train(model, train_samples, validation_samples)

    # display
    display_results(history_object)
コード例 #9
0
    def exact_date(self):

        clear_screen()

        search_item = "date"

        while True:

            date_object, log_entries = Task.store_date(self), compile_log()

            provided_date = date_object.strftime("%Y-%m-%d")

            matched_dates = list(
                filter(lambda x: x['date'] == provided_date,
                       log_entries))  #filters tasks

            if not matched_dates:
                empty_results = no_results(search_item)

                if empty_results:
                    continue  # prompt for a new date input if 0 results are generated from the previous date
                break  # prompt the user for a new search; N - Main Menu; Y - Search Menu
            else:
                date_criteria = display_results(
                    matched_dates, search_item
                )  # iterates over the entries that meet user's criteria

                if not date_criteria or date_criteria:
                    break  # prompt the user for a new search; N - Main Menu; Y - Search Menu
コード例 #10
0
def download_osm(area_name):
    """ Given an area name, download corresponding OSM elements """

    # Get area id
    nomanatim = Nominatim()
    area_id = nomanatim.query(area_name).areaId()

    # Form and ask query
    overpass = Overpass()
    query = overpassQueryBuilder(area=area_id, elementType=['way', 'node'], out='body')
    osm_data = overpass.query(query, timeout=600)

    # Display results
    utils.display_results(osm_data)

    # Keep elements (ways and nodes) in JSON format
    elements = osm_data.toJSON()['elements']
    return elements
コード例 #11
0
def ensemble_classifier(matrixes, pred_path, type):
    predictions = []
    models = []
    pred_file = os.path.join(pred_path,type,'')
    for model, flag in ensemble_config.items():
        if flag:
            with open(pred_file + '{}_classifier.pkl'.format(model), 'rb') as f:
                file = pickle.load(f)
                predictions.append(file)
                models.append(model)

    # Predict
    pred_probs = sum(predictions) / len(predictions)
    matrixes.append(confusion_matrix(y_test,np.argmax(pred_probs, axis=-1)))
    combination = '_'.join(models) + "_classifier.pkl"
    pred_file = os.path.join(pred_path,type,combination)
    with open(pred_file, 'wb') as f:
        pickle.dump(pred_probs, f)
    display_results("Ensemble", y_test, pred_probs)
コード例 #12
0
def mnb_classifier(matrixes,pred_path, weights_path, type):
    # Train model
    mnb_classifier = MultinomialNB()
    mnb_classifier.fit(x_train, y_train)

    # Save model
    model_file = os.path.join(weights_path,type,"mnb_classifier.pkl")
    pickle.dump(mnb_classifier,open(model_file, 'wb'))

    # Predict
    pred_probs = mnb_classifier.predict_proba(x_test)
    matrixes.append(confusion_matrix(y_test,np.argmax(pred_probs, axis=-1)))

    # Results
    model_name = "MNB classifier"
    display_results(model_name, y_test, pred_probs)

    pred_file = os.path.join(pred_path,type,"mnb_classifier.pkl")
    with open(pred_file, 'wb') as f:
        pickle.dump(pred_probs, f)
コード例 #13
0
def lr_classifier(matrixes,pred_path, weights_path, type):
    # Train model
    lr_classifier = LogisticRegression(solver='lbfgs', multi_class='multinomial', max_iter=1000)
    lr_classifier.fit(x_train, y_train)

    # Save model
    model_file = os.path.join(weights_path,type,"lr_classifier.pkl")
    pickle.dump(lr_classifier,open(model_file, 'wb'))

    # Predict
    pred_probs = lr_classifier.predict_proba(x_test)
    matrixes.append(confusion_matrix(y_test,np.argmax(pred_probs, axis=-1)))

    # Results
    model_name = "LR classifier"
    display_results(model_name, y_test, pred_probs)

    pred_file = os.path.join(pred_path,type,"lr_classifier.pkl")
    with open(pred_file, 'wb') as f:
        pickle.dump(pred_probs, f)
コード例 #14
0
def random_forest(matrixes,pred_path, weights_path, type):
    # Train model
    rf_classifier = RandomForestClassifier(n_estimators=1200, min_samples_split=25)
    rf_classifier.fit(x_train, y_train)

    # Save model
    model_file = os.path.join(weights_path,type,"rf_classifier.pkl")
    pickle.dump(rf_classifier,open(model_file, 'wb'))

    # Predict
    pred_probs = rf_classifier.predict_proba(x_test)
    matrixes.append(confusion_matrix(y_test,np.argmax(pred_probs, axis=-1)))
    
    # Results
    model_name = "Random Forest"
    display_results(model_name,y_test, pred_probs)
    
    pred_file = os.path.join(pred_path,type,"rf_classifier.pkl")
    with open(pred_file, 'wb') as f:
        pickle.dump(pred_probs, f)
コード例 #15
0
def xgb_classifier(matrixes,pred_path, weights_path, type):
    # Train model
    xgb_classifier = xgb.XGBClassifier(max_depth=7, learning_rate=0.008, objective='multi:softprob', 
                                   n_estimators=1200, sub_sample=0.8, num_class=len(emotion_dict),
                                   booster='gbtree', n_jobs=4)
    xgb_classifier.fit(x_train, y_train)
    
    # Save model
    model_file = os.path.join(weights_path,type,"xgb_classifier.pkl")
    pickle.dump(xgb_classifier,open(model_file, 'wb'))

    # Predict
    pred_probs = xgb_classifier.predict_proba(x_test)
    matrixes.append(confusion_matrix(y_test,np.argmax(pred_probs, axis=-1)))

    # Results
    model_name = "XGB classifier"
    display_results(model_name, y_test, pred_probs)

    pred_file = os.path.join(pred_path,type,"xgb_classifier.pkl")
    with open(pred_file, 'wb') as f:
        pickle.dump(pred_probs, f)
コード例 #16
0
    def search_time(self):

        clear_screen()

        search_item = "time"

        while True:

            log_details = compile_log()

            while True:
                try:
                    minutes_criteria = int(
                        input(
                            "\nProvide an amount of time (in minutes) in order to search entries within the work log: "
                        ))
                except ValueError:
                    print(
                        "To search time entries, only integers are permitted (15, 30, etc.)"
                    )
                else:
                    break

            time_tasked = []

            for t_entry in log_details:
                time_logged = datetime.datetime.strptime(
                    t_entry['time'], "%H:%M").time()  # a time object

                if time_logged.hour >= 1:
                    total_task_minutes = (
                        60 * time_logged.hour) + time_logged.minute
                else:
                    total_task_minutes = time_logged.minute

                if minutes_criteria == total_task_minutes:
                    time_tasked.append(t_entry)

            if not time_tasked:
                empty_results = no_results(search_item)

                if empty_results:
                    continue  # prompt for a new time input if 0 results are generated from the previous time
                break  # prompt the user for a new search; N - Main Menu; Y - Search Menu
            else:
                timing_results = display_results(
                    time_tasked, search_item
                )  # iterates over the entries that meet user's criteria

                if not timing_results or timing_results:
                    break  # prompt the user for a new search; N - Main Menu; Y - Search Menu
コード例 #17
0
def fit_logistic(image_size=(28, 28),
             datasets='../data/mnist.pkl.gz', outpath='../output/mnist_logistic_regression.params',
             learning_rate=0.13, n_epochs=1000, batch_size=600,
             patience=5000, patience_increase=2, improvement_threshold=0.995):

    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')


    classifier = LogisticRegression(
        input=x,
        n_in=reduce(np.multiply, image_size),
        n_out=10
    )
    cost = classifier.negative_log_likelihood(y)
    learner = SupervisedMSGD(
        index,
        x,
        y,
        batch_size,
        learning_rate,
        load_data(datasets),
        outpath,
        classifier,
        cost
    )

    best_validation_loss, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs,
        patience=patience,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold
    )
    display_results(best_validation_loss, elapsed_time, epoch)

    return learner
コード例 #18
0
ファイル: autoencoders.py プロジェクト: 121onto/theano-demos
def fit_autoencoder(image_size=(28, 28), n_image_channels=1,
            datasets='../data/mnist.pkl.gz', outpath='../output/mnist_autoencoder.params',
            n_visible=784, n_hidden=500,
            learning_rate=0.01, corruption_level=0.0,
            n_epochs=1000, batch_size=20, patience=10000,
            patience_increase=2, improvement_threshold=0.995):

    index = T.lscalar()
    x = T.dmatrix(name='input')

    encoder = AutoEncoder(
        np_rng=rng.RandomState(SEED),
        input=x,
        th_rng=None,
        n_visible=n_visible,
        n_hidden=n_hidden,
        corruption_level=corruption_level
    )
    learner = UnsupervisedMSGD(
        index,
        x,
        batch_size,
        learning_rate,
        load_data(datasets),
        outpath,
        encoder,
        encoder.cost
    )
    best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs,
        patience=patience,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold
    )
    display_results(best_validation_error, elapsed_time, epoch)

    return learner
 def IDS_approx(self, T, N, display_results=False):
     """
     Implementation of the Information Directed Sampling with approximation of integrals using a grid on [0, 1]
     for Bernoulli Bandit Problems with beta prior
     :param T: int, time horizon
     :param N: int, number of points to take in the [0,1] interval
     :param display_results: boolean, if True displayed. Defaut False
     :return: np.arrays, reward obtained by the policy and sequence of chosen arms
     """
     Sa, Na, reward, arm_sequence = self.init_lists(T)
     beta1, beta2 = self.init_prior()
     X, f, F, G, B, maap, p_star, prod_F1, g = self.init_approx(
         N, beta1, beta2)
     for t in range(T):
         if not self.flag:
             if np.max(p_star) > self.threshold:
                 # stop learning policy
                 self.flag = True
                 self.optimal_arm = np.argmax(p_star)
                 arm = self.optimal_arm
             else:
                 delta, g, p_star, maap = self.IR_approx(
                     N, beta1, beta2, X, f, F, G, g)
                 arm = self.IDSAction(delta, g)
         else:
             arm = self.optimal_arm
         self.update_lists(t, arm, Sa, Na, reward, arm_sequence)
         prev_beta = np.array([copy(beta1[arm]), copy(beta2[arm])])
         # Posterior update
         beta1[arm], beta2[
             arm] = beta1[arm] + reward[t], beta2[arm] + 1 - reward[t]
         if display_results:
             utils.display_results(delta, g, delta**2 / g, p_star)
         f, F, G, B = self.update_approx(arm, reward[t], prev_beta, X, f, F,
                                         G, B)
     return reward, arm_sequence
コード例 #20
0
    def search_pattern(self):

        clear_screen()

        search_type = 'regular expression'

        while True:

            search_item = "regular expression"

            log_entries = compile_log()

            pattern = input(
                r"Provide a regular expression pattern to match up entries in the work log: "
            )

            while not pattern:
                pattern = input(
                    r"Sorry. The pattern provided is not accepted: ")

            re_pattern = re.compile(pattern)

            pattern_matches = []

            for single in log_entries:
                if re_pattern.match(single['task']) or re_pattern.match(
                        single['details']):
                    pattern_matches.append(single)

            if not pattern_matches:
                empty_results = no_results(search_item)

                if empty_results:
                    continue  #prompt for a new string input if 0 results are generated from the previous string
                break  # prompt the user for a new search; N - Main Menu; Y - Search Menu

            else:
                pattern_results = display_results(
                    pattern_matches, search_item
                )  # iterates over the entries that meet user's criteria

                if not pattern_results or pattern_results:
                    break  # prompt the user for a new search; N - Main Menu; Y - Search Menu
コード例 #21
0
    def search_string(self):

        clear_screen()

        search_item = "phrase"

        while True:

            string_criteria = input(
                f"Provide a {search_item} in order to search entries within the work log?\n\n>>> "
            ).title()

            while not string_criteria:
                string_criteria = input(
                    "The worklog cannot be searched with empty criteria.\nPlease search under a different phrase:\n\n>>>"
                )

            log_details = compile_log()

            strings_matched = list(
                filter(
                    lambda x: string_criteria in x['task'] or string_criteria
                    in x['details'], log_details))

            if not strings_matched:
                empty_results = no_results(search_item)

                if empty_results:
                    continue  # prompt for a new string input if 0 results are generated from the previous string
                break  # prompt the user for a new search; N - Main Menu; Y - Search Menu

            else:
                string_results = display_results(
                    strings_matched, search_item
                )  # iterates over the entries that meet user's criteria

                if not string_results or string_results:
                    break  # prompt the user for a new search; N - Main Menu; Y - Search Menu
コード例 #22
0
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)

for blob in data_loader:
    im_data = blob['data']
    gt_data = blob['gt_density']
    density_map = net(im_data, gt_data)
    density_map = density_map.data.cpu().numpy()
    gt_count = np.sum(gt_data)
    et_count = np.sum(density_map)
    mae += abs(gt_count - et_count)
    mape += abs(gt_count - et_count) / gt_count
    mse += ((gt_count - et_count) * (gt_count - et_count))
    total_count += gt_count
    if vis:
        utils.display_results(im_data, gt_data, density_map)
    if save_output:
        utils.save_density_map(
            density_map, output_dir,
            'output_' + blob['fname'].split('.')[0] + '.png')
ape = mae / total_count
mae = mae / data_loader.get_num_samples()
mape = mape / data_loader.get_num_samples()
mse = np.sqrt(mse / data_loader.get_num_samples())
print('\nMAE: ' + str(mae) + ' MSE: ' + str(mse))
print('\nMAPE: ' + str(mape) + ' APE: ' + str(ape))
f = open(file_results, 'w')
f.write('MAE: %0.2f, MSE: %0.2f' % (mae, mse))
f.close()
コード例 #23
0
from utils import read_imagenet_classnames, display_results, run_inference, parse_base64
from torchvision import models

parser = argparse.ArgumentParser(description='Inference Trained Model')
parser.add_argument('--data', metavar='DIR', default='./data', help='default data path')
parser.add_argument('-bs', '--batch-size', metavar='BS', default=2, help='maximum batchsize')
parser.add_argument('-tp', '--top-predictions', metavar='NUMPRED',\
                     default=5, help='number of top predictions per sample')
parser.add_argument('-exp', '--export', action="store_true",help='export model to onnx')


def export_model(model):
    model_path = 'checkpoints/model.pt'
    sample_input = torch.randn((1, 3, 256, 256))
    model = model.cpu()
    model.eval()
    model = torch.jit.trace(model, sample_input)
    torch.jit.save(model, model_path)    


if __name__ == "__main__":
    args = parser.parse_args()
    model = models.resnet18(pretrained=True)
    if args.export:
        export_model(model)
    else:
        imagenet_classes = read_imagenet_classnames("cache/imagenet_classnames.txt")
        data = preprocess_data("cache")
        predictions = run_inference(model, data[0], args.top_predictions)
        display_results(data, predictions, imagenet_classes)
コード例 #24
0
ファイル: backtest.py プロジェクト: jean1591/stock_pkg
                # Update wallet, ledger & portfolio
                wallet, ledger, portfolio = update_db(wallet, ledger,
                                                      portfolio, d,
                                                      cryptos[key],
                                                      c_d_df["order_type"],
                                                      close)

        except KeyError as e:
            print(f"KeyError -- {cryptos[key]} -- {e}")

# ROI
portfolio = undivide_crypto(["BTC-USD", "ETH-USD"], portfolio, 100)
portfolio_value, overall_value, roi = calculate_metrics(
    portfolio, df_list, cryptos, wallet, 5000)

display_results(cryptos, 5000, wallet, portfolio, portfolio_value,
                overall_value, roi)
generate_log(ledger)

ledger_df = pd.DataFrame(ledger)
ledger_df.columns = [
    "date", "symbol", "order_type", "close", "qty", "order_value"
]

signals_df = {}
for crypto in cryptos:
    signals_df[crypto] = {}
    buy_df = ledger_df[(ledger_df["symbol"] == crypto)
                       & (ledger_df["order_type"] == "buy")].copy()
    sell_df = ledger_df[(ledger_df["symbol"] == crypto)
                        & (ledger_df["order_type"] == "sell")].copy()
コード例 #25
0
                torch.nn.utils.clip_grad_norm_(feature_descriptor_model.parameters(), 10.0)
                optimizer.step()

                if batch == 0:
                    mean_rr_loss = np.mean(rr_loss.item())
                else:
                    mean_rr_loss = (mean_rr_loss * batch + rr_loss.item()) / (batch + 1.0)

            # Result display
            if batch % display_interval == 0:
                with torch.no_grad():
                    gt_heatmaps_1 = gt_heatmaps_1.cuda()
                    gt_heatmaps_2 = gt_heatmaps_2.cuda()
                    display_success = utils.display_results(colors_1, colors_2, feature_maps_1, feature_maps_2,
                                                            boundaries, response_map_1,
                                                            gt_heatmaps_1, response_map_2, gt_heatmaps_2,
                                                            sift, cross_check_distance, step,
                                                            writer, phase="Train")

            step += 1
            tq.update(batch_size)
            tq.set_postfix(loss='average: {:.5f}, current: {:.5f}'.format(mean_rr_loss, rr_loss.item())
                           )
            writer.add_scalars('Train', {'loss': mean_rr_loss}, step)

        tq.close()

        if cur_epoch % validation_interval != 0:
            continue

        # Validation
コード例 #26
0
def fit_stacked_autoencoder(image_size=(28, 28), n_out=10,
            datasets='../data/mnist.pkl.gz', outpath='../output/mnist_autoencoder.params',
            hidden_layer_sizes=[500, 500, 500], corruption_levels=[0.1, 0.2, 0.3],
            learning_rate_encoder=0.001, learning_rate_full=0.1,
            n_epochs_encoder=15, n_epochs_full=1000,
            batch_size_encoder=1, batch_size_full=20,
            patience_encoder=5000, patience_full=5000,
            patience_increase=2, improvement_threshold=0.995):

    n_inputs = reduce(np.multiply, image_size)
    index = T.lscalar(name='input')
    x = T.matrix(name='x')
    y = T.ivector(name='y')
    datasets = load_data(datasets)

    stacked_encoder = StackedAutoEncoder(
        x=x,
        y=y,
        np_rng=rng,
        th_rng=None,
        n_inputs=n_inputs,
        hidden_layer_sizes=hidden_layer_sizes,
        corruption_levels=corruption_levels,
        n_out=n_out
    )
    cost = stacked_encoder.cost(y)

    # pretrain
    for i, encoder in enumerate(stacked_encoder.encoder_layers):
        print("Pre-training encoder layer %i" % i)
        learner = UnsupervisedMSGD(
            index,
            x,
            batch_size_encoder,
            learning_rate_encoder,
            datasets,
            None,
            encoder,
            encoder.cost
        )
        best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
            n_epochs=n_epochs_encoder,
            patience=patience_encoder,
            patience_increase=patience_increase,
            improvement_threshold=improvement_threshold
        )
        print("resuts for pre-training encoder %i" % i)
        display_results(best_validation_error, elapsed_time, epoch)

    print("Fitting full model")
    learner = SupervisedMSGD(
        index,
        x,
        y,
        batch_size_full,
        learning_rate_full,
        datasets,
        outpath,
        stacked_encoder,
        cost
    )
    best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs_full,
        patience=patience_full,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold
    )
    display_results(best_validation_error, elapsed_time, epoch)
コード例 #27
0
        model.load_weights(MODEL_WEIGHTS)

        test_gen = ImageDataGenerator(rescale=1. / 255.0)
        test_gen = test_gen.flow_from_directory(directory=TEST_DIR,
                                                target_size=(64, 64),
                                                batch_size=1,
                                                class_mode=None,
                                                color_mode='rgb',
                                                shuffle=False,
                                                seed=69)

        class_indices = train_gen.class_indices
        class_indices = dict((v, k) for k, v in class_indices.items())
        test_gen.reset()

        predictions = model.predict_generator(test_gen,
                                              steps=len(test_gen.filenames))
        predicted_classes = np.argmax(np.rint(predictions), axis=1)
        true_classes = test_gen.classes

        prf, conf_mat = display_results(true_classes, predicted_classes,
                                        class_indices.values())

        print(prf)

        plot_predictions(true_classes, predictions, test_gen, class_indices)

    print("saving model..")
    model.save(SAVE_PATH)
コード例 #28
0
    def search_dates(self):

        clear_screen()

        search_item = 'date'

        while True:

            date_origin, log_entries = Task.store_date(self), compile_log()

            while True:
                try:
                    date_margin = abs(
                        int(
                            input(
                                "\nHow many days before and after your date do you want to look: "
                            )))
                except ValueError:
                    print(
                        "The value provided can't establish the desired date range."
                    )
                else:
                    break

            clear_screen()

            search_item = "time"

            try:
                start_date = date_origin - datetime.timedelta(days=date_margin)
            except OverflowError:
                start_date = datetime.date(year=1, month=1, day=1)

            try:
                end_date = date_origin + datetime.timedelta(days=date_margin)
            except OverflowError:
                end_date = datetime.date(year=9999, month=12, day=31)

            dates_wanted = []

            for date_log in log_entries:
                date_object = datetime.datetime.strptime(
                    date_log['date'], '%Y-%m-%d').date()

                if date_object >= start_date and date_object <= end_date:
                    dates_wanted.append(date_log)

            if not dates_wanted:
                empty_results = no_results(
                    search_item
                )  # iterates over the entries that meet user's criteria

                if empty_results:
                    continue  # prompt for a new date input if 0 results are generated from the previous date and date range
                break  # prompt the user for a new search; N - Main Menu; Y - Search Menu

            else:
                date_range = display_results(dates_wanted, search_item)

                if not date_range or date_range:
                    break  # prompt the user for a new search; N - Main Menu; Y - Search Menu
def main(args):

    # Load model
    print("[INFO] Loading model {}".format(args['model']))
    model = load_model(args['model'])
    model.summary()

    # Load image from the directory
    images, file_paths = load_image_from_directory(args['data_path'])

    if not os.path.exists(
            os.path.join('../extracted_features', args['model'] + '.npy')):
        if not os.path.exists('../extracted_features'):
            os.mkdir('../extracted_features')

        # Extract feature
        print("[INFO] Extracting feature from image using {}".format(
            args['model']))
        feature_vectors = model.predict(images[0])

        # Store feature to disk
        file_features = os.path.join('../extracted_features',
                                     '{}'.format(args['model']))
        np.save(file_features, feature_vectors)

    # Testing load feature from saving local
    print("[INFO] Loading extracted features from disk ...")
    with open(
            os.path.join('../extracted_features', args['model']) + '.npy',
            'rb') as f:
        feature_vectors = np.load(f)

    # Load query image
    try:
        qimage = cv2.imread(args['query_path'])
        #cv2.imshow('Query image', qimage)
        #cv2.waitKey(0)
    except:
        print("!!! Invalid path.")
        exit(0)

    # Extract feature of query image
    qimage = np.expand_dims(cv2.resize(qimage, IMAGE_SIZE, cv2.INTER_AREA),
                            axis=0)
    qvector = model.predict(qimage)[0]

    # Get the similarities between feature vectors and query
    print(
        "[INFO] Calculating the similarities between feature vectors and query vector using {} measure..."
        .format(args['measure']))
    similarities = MEASURE[args['measure']](feature_vectors, qvector)

    # Ranking base on similarities
    indices = np.argsort(similarities)[::-1]

    # Display results
    display_results(qimage[0], args['query_path'], file_paths, args['model'],
                    indices, args['number'])

    # Load grouth truth
    ground_truth_path = os.path.join(args['data_path'], 'ground_truth')
    ground_truth_file = os.path.join(
        ground_truth_path,
        os.path.split(args['query_path'])[1].split(".")[0] + '.txt')

    y_true = []
    with open(ground_truth_file, 'r') as f:
        for line in f.readlines():
            y_true.append(line.rstrip())

    # print(y_true)
    # input()

    y_predict = []
    for i in range(len(indices[:args['number']])):
        y_predict.append(os.path.split(file_paths[indices[i]])[1])

    # Calculate the Relevant or Irrelevant
    compared_result = []
    for i in range(len(y_predict)):
        if y_predict[i] in y_true:
            compared_result.append(1)
        else:
            compared_result.append(0)

    # calculate the AP
    ap = calculate_AP(compared_result, total=len(y_true))

    print("The average precision is {}".format(ap))
コード例 #30
0
dset.getDataset()

# Estimator
dcgh = DeepCGH(data, model)

if retrain:
    dcgh.train(dset)

#%%
while (True):
    files = glob(frame_path)
    if len(files) > 0:
        data = scio.loadmat(files[0])['data']
        if data.sum() == 0:
            break
        if coordinates:
            data = dset.coord2image(data)
    phase = np.squeeze(dcgh.get_hologram(data))

    # you can add your code here

#%% This is a sample test. You can generate a random image and get the results
propagate = get_propagate(data, model)
image = dset.get_randSample()[np.newaxis, ...]
# making inference is as simple as calling the get_hologram method
phase = dcgh.get_hologram(image)
propagate = get_propagate(data, model)
reconstruction = propagate(phase)
display_results(image, phase, reconstruction, 1)