Example #1
0
def run_skeleton(processor_function, seg_type):
    if os.path.exists(TMP_DIR):
        shutil.rmtree(TMP_DIR)

    if os.path.exists(OUT_DIR):
        shutil.rmtree(OUT_DIR)
    os.mkdir(TMP_DIR)
    os.mkdir(OUT_DIR)
    cl = request.content_length

    keys = request.get_json()
    _zip = None
    if ORIGINAL_KEY in keys:
        os.mkdir(os.path.join(TMP_DIR, '1000000'))
        os.mkdir(os.path.join(TMP_DIR, '1000000', ORIGINAL_KEY))

    _zip = keys[ORIGINAL_KEY]

    Base64ToFile(TMP_ZIP, _zip)

    zip_ref = zipfile.ZipFile(TMP_ZIP, 'r')
    zip_ref.extractall(os.path.join(TMP_DIR, '1000000', ORIGINAL_KEY))
    zip_ref.close()

    P = os.path.join(TMP_DIR, '1000000', ORIGINAL_KEY)

    cvt_dcm_png(P, seg_type)

    if seg_type == "CT_LIVER":
        command = "python3 main.py --mode=infer --infer_data=%s --output_folder=%s --ckpt=/mnt/SSD_BIG2/LSS_LIV/DenseNetCkpt_deeplab_third/-0 --layers_per_block=5,6,6,8,7,9 --batch_size=1 --organ=liver"

        process = subprocess.Popen((command % (P, OUT_DIR)).split(" "))
        process.wait()
    elif seg_type == "CT_SPLEEN":
        command = "python3 main.py --mode=infer --infer_data=%s --output_folder=%s --ckpt=/mnt/SSD_BIG2/LSS_LIV/DenseNetCkpt_deeplab_spleen_first/-1 --layers_per_block=5,6,6,8,7,9 --batch_size=1 --organ=spleen"

        process = subprocess.Popen((command % (P, OUT_DIR)).split(" "))
        process.wait()
    elif seg_type == "MR_LIVER":
        print("MR_LIVER execute")
        predict.run("patient", "out", "MRI", (512, 512))

    fs = os.listdir(OUT_DIR)
    fs.sort()
    for idx, f in enumerate(fs):
        write_dicom(imread(os.path.join(OUT_DIR, f)), idx,
                    os.path.join(OUT_DIR, f)[:-4] + ".dcm")
        os.remove(os.path.join(OUT_DIR, f))

    zip_out = zipfile.ZipFile(OUT_ZIP, 'w', zipfile.ZIP_DEFLATED)
    for f in os.listdir(OUT_DIR):
        zip_out.write(os.path.join(OUT_DIR, f), f)

    zip_out.close()

    encoded = FileToBase64(OUT_ZIP)
    print('encoded length : %s' % (len(encoded)))
    return json.dumps({ORIGINAL_KEY: encoded})
Example #2
0
def postimage(request):

    if request.method == 'POST':
        form = InventoryForm(request.POST, request.FILES)
        if form.is_valid():
            try:
                data = predict.run(request.FILES['image'])
            except:
                pass
            instance = Inventory(image=request.FILES['image'],
                                 prediction=data['description'])
            instance.save()
            # image = instance.image

        # data = {'data': 'otherdata'}
        # data = request.FILES
        # Inventory.objects.create(image=data)
        # Inventory.save()

        # data = predict.run(data)
        return HttpResponse(json.dumps(data), content_type='application/json')
    else:
        return HttpResponse(json.dumps(
            {"nothing to see": "this isn't happening"}),
                            content_type="application/json")
Example #3
0
def main(_):
    # # train
    # train.run(False)
    #
    # # eval
    # train.run(True)

    # predict
    test_context = "but while the new york stock exchange did n't fall apart friday as the dow jones industrial average plunged N points most of" \
                   " it in the final hour it barely managed to stay this side of chaos some circuit breakers installed after the october N crash " \
                   "failed their first test traders say unable to cool the selling panic in both stocks and futures the N stock specialist firms on " \
                   "the big board floor the buyers and sellers of last resort who were criticized after the N crash once again could n't handle the " \
                   "selling pressure big investment banks refused to step up to the plate to support the beleaguered floor traders by buying big blocks" \
                   " of stock traders say heavy selling of standard & poor 's 500-stock index futures in chicago <unk> beat stocks downward "
    test_question = "how was the test on october?"
    predict.run(test_context, test_question)
Example #4
0
def main(argv):
    try:
        opts, _ = getopt.getopt(argv, "st:p:")
        if len(opts) != 1:
            raise getopt.GetoptError('Bad argument')

        opt, _ = opts[0]
        if opt == '-s':
            scrapper_script.run(logging)
        elif opt == '-t':
            if len(argv) != 2:
                raise getopt.GetoptError('Bad argument')

            train_script.run(logging, argv[1])
        elif opt == '-p':
            if len(argv) != 3:
                raise getopt.GetoptError('Bad argument')

            entities = predict_script.run(logging, argv[1], argv[2])
            for e in entities:
                print(e)
        else:
            raise getopt.GetoptError('Bad argument')
    except getopt.GetoptError:
        print(
            'Usage: main.py [-s] [-t "model name"] [-p "model name" "Text here"]'
        )
        print(opts)
        print(argv)
        exit(2)
Example #5
0
def main():
    model = ""
    opts, args = getopt.getopt(sys.argv[1:], "ht:p:m:")
    for op, value in opts:
        if op == "-h":
            usage()
            sys.exit()
        elif op == "-t":
            input_file = value
        elif op == "-p":
            input_file = value
        elif op == "-m":
            model = value

    input_wav = ffmpeg.convert(input_file)
    if model == "":
        input_wav = "20160203c.wav"
        songs_20160203c = [[0, 265], [1028, 1245], [1440, 1696], [2177, 2693]]
        song_dump(songs_20160203c)
        model = train.run(input_wav, songs_20160203c)
    Y, delimit_points = predict.run(input_wav, model)
    for i in delimit_points:
        print str(i / 3600) + ":" + str(i % 3600 / 60) + ":" + str(i % 60)

    ffmpeg.cut(input_file, delimit_points)

    plt.figure()
    plt.plot(-0.2)
    plt.plot(1.2)
    if model == "":
        plt.plot(songs_20160203c, 'b')
    plt.plot(Y, 'r')
    plt.show()
Example #6
0
    def get_frame(self):
        success, image = self.video.read()
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        self.preds = predict.run(image)
        # print("predicton": self.preds)

        image_preds = image.copy()
        image_no = image.copy()

        print(self.current_exercise, self.preds)

        if self.current_exercise == self.preds:
            text = self.preds
        else:
            text = "Detecting..."

        _h, _w, _c = image_preds.shape
        cv2.putText(image_preds, text, (_h // 2 + 2, _w // 2 + 2),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA)
        ret, jpeg_preds = cv2.imencode('.jpg', image_preds)

        # _h, _w, _c = image_no.shape
        # cv2.putText(image_no, "detecting...", (_h//2+2,_w//2+2), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 2, cv2.LINE_AA)
        # ret, jpeg_no = cv2.imencode('.jpg', image_no)

        return jpeg_preds, jpeg_preds.tobytes(), self.preds
Example #7
0
def test_predict(model_endpoint_id: str) -> None:
    predictions = predict.run(
        project=PROJECT,
        region=REGION,
        model_endpoint_id=model_endpoint_id,
        image_file="animals/0036/0072.jpg",  # tapirus indicus
    )
    assert len(predictions) > 0, f"predictions: {repr(predictions)}"
Example #8
0
def index():
    if request.method == 'POST':
        review = request.form.get("input")
        predict_result = predict.run(review)
        return render_template('home.html',
                               input=review,
                               print_result=predict_result)
    return render_template('home.html')
def main():
    args = parse_args()
    if args.config_file is None:
      raise Exception('no configuration file!')

    config = utils.config_parser.load(args.config_file)
    
    config.publish = args.publish
    config.dev = args.dev

    config.vote = args.vote
    
    # pp.pprint(config)

    if config.mode == "TRA":
        train.run(config)
    elif config.mode == "PRD":
        predict.run(config)
Example #10
0
def pred():
    PER_mess, LOC_mess, ORG_mess = predict.run(request.json, True)
    PER_result, LOC_result, ORG_result = [], [], []
    for p in PER_mess.values():
        PER_result.append(p)
    for l in LOC_mess.values():
        LOC_result.append(l)
    for o in ORG_mess.values():
        ORG_result.append(o)

    return jsonify({'PER': PER_result, 'LOC': LOC_result, 'ORG': ORG_result})
Example #11
0
def run(down_station, input_list, include_time, sample_size, network_type,
        nr_layers, nr_units, nr_epochs):
    """Runner"""
    start_time_run = time.time()

    result_dir = util.get_result_dir(down_station, network_type, nr_layers,
                                     nr_units, sample_size)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    # down_station, input_list, include_time, sample_size, network_type
    (y_train, x_train, y_cv, x_cv, _, _, _, _, _, _, _, _, _, _,
     _) = data.construct(down_station, input_list, include_time, sample_size,
                         network_type)

    input_dim = 0
    input_dim_2 = 0
    if (network_type == 'bnn') or (network_type == 'cnn') or (
            network_type == 'rnn_lstm') or (network_type == 'rnn_gru'):
        (_, input_dim, input_dim_2) = x_train.shape
    elif (network_type == 'multi_cnn'):
        input_dim = []
        for x_train_i in x_train:
            (_, input_dim_i, _) = x_train_i.shape
            input_dim.append(input_dim_i)
    else:
        (_, input_dim) = x_train.shape

    my_model = model.create(result_dir, input_dim, nr_layers, nr_units,
                            network_type, input_dim_2)
    train(my_model, result_dir, y_train, x_train, y_cv, x_cv, nr_epochs)

    util.plot_training_performance(result_dir)
    predict.run(down_station, input_list, include_time, sample_size,
                network_type, nr_layers, nr_units)

    elapsed_time_run = time.time() - start_time_run
    print(
        time.strftime("Training time : %H:%M:%S",
                      time.gmtime(elapsed_time_run)))
Example #12
0
def handleDifference(before, current, typeR="daily"):
    db = connect2DB()
    if before != None and current != None:
        print "Before " + before[0]['date'].ctime()
        print "Current " + current[0]['date'].ctime()
        if before[0]['date'].ctime() != current[0]['date'].ctime():
            for b in before:
                for c in current:
                    if b['commodity'] == c['commodity']:
                        if typeR == "daily":
                            if abs(b['price'] - c['price']) > MIN_DIFF:
                                print "price for ", b['commodity'], " changed"
                                # Add new record to the general dataset
                                # updateGeneralDataSet(c, b, typeR)
                                # Send Push notification of change record
                                change = "increased"
                                if b['price'] >= c['price']:
                                    change = "decreased"
                                message = c[
                                    'commodity'] + " has " + change + " to $" + str(
                                        c['price']) + " per " + c['unit']
                                name = b['commodity'].replace(" ", "")
                                idx = name.find("(")
                                fcm.notify(message, name)
                            else:

                                print "price for ", b[
                                    'commodity'], " remained the same"
                            pred = predict.run(c['commodity'])
                            if pred != -1:
                                newRec = {
                                    "name": c['commodity'],
                                    "price": pred
                                }
                                db.predictions.insert_one(newRec)
                            # breaktypeR

            if typeR == "daily":
                fetcher.storeMostRecentDaily(db, current)
                fetcher.storeDaily(db, current)
                fire.sendFire(current)
                fire.sendRecent(current)
            if typeR == "monthly":
                print current
                fetcher.storeMostRecentMonthly(db, current)
                fetcher.storeMonthly(db, current)
        else:
            print "no new record found"
    else:
        print "Doesn't exist"
Example #13
0
def run(username, url):
    print("----------------------------------------------------------------")
    parser= argparse.ArgumentParser(description ="where to store the rec after conversion")
    parser.add_argument("--str_path", type=str, default="./audio_storage/collection", help="where would we store the good stuff?")
    parser.add_argument("--test_path", type=str, default='./audio_storage/test', help="path to test folder")
    parser.add_argument("--test_out", type=str, default='./audio_storage/testout', help="path to test folder")
    args, _ = parser.parse_known_args()
    path_out=load_test_data(username, url, args)
    print(path_out+"/"+username)
    try:
        ans = predict.run(path_out+"/"+username)
    except:
        print("error in pred")
    print(ans)
    return ans['2'],username
Example #14
0
def run_predict() -> dict:
    import predict

    try:
        args = flask.request.get_json() or {}
        bucket = args["bucket"]
        model_dir = f"gs://{bucket}/model_output"
        data = args["data"]
        predictions = predict.run(data, model_dir)

        return {
            "method": "predict",
            "model_dir": model_dir,
            "predictions": predictions,
        }
    except Exception as e:
        return {"error": f"{type(e).__name__}: {e}"}
Example #15
0
def handleDifference(before, current, typeR="daily"):
    db = connect2DB()
    if before != None and current != None:
        print "Before " + before[0]["date"].ctime()
        print "Current " + current[0]["date"].ctime()
        if before[0]["date"].ctime() != current[0]["date"].ctime():
            for b in before:
                for c in current:
                    if b["commodity"] == c["commodity"]:
                        if typeR == "daily":
                            if abs(b["price"] - c["price"]) > MIN_DIFF:
                                print "price for ", b["commodity"], " changed"
                                # Add new record to the general dataset
                                # updateGeneralDataSet(c, b, typeR)
                                # Send Push notification of change record
                                change = "increased"
                                if b["price"] >= c["price"]:
                                    change = "decreased"
                                message = (
                                    c["commodity"] + " has " + change + " to $" + str(c["price"]) + " per " + c["unit"]
                                )
                                name = b["commodity"].replace(" ", "")
                                idx = name.find("(")
                                Push.message(message, channels=[name[0:idx]])
                            else:
                                print "price for ", b["commodity"], " remained the same"
                            pred = predict.run(c["commodity"])
                            if pred != -1:
                                newRec = {"name": c["commodity"], "price": pred}
                                db.predictions.insert(newRec)
                                # breaktypeR

            if typeR == "daily":
                fetcher.storeMostRecentDaily(db, current)
                fetcher.storeDaily(db, current)
            if typeR == "monthly":
                fetcher.storeMostRecentMonthly(db, current)
                fetcher.storeMonthly(db, current)
        else:
            print "no new record found"
    else:
        print "Doesn't exist"
Example #16
0
def test_e2e_local() -> None:
    with tempfile.TemporaryDirectory() as temp_dir:
        train_data_dir = os.path.join(temp_dir, "datasets", "train")
        eval_data_dir = os.path.join(temp_dir, "datasets", "eval")
        model_dir = os.path.join(temp_dir, "model")
        tensorboard_dir = os.path.join(temp_dir, "tensorboard")
        checkpoint_dir = os.path.join(temp_dir, "checkpoints")

        # Create the dataset TFRecord files.
        create_datasets.run(
            raw_data_dir="test_data",
            raw_labels_dir="test_data",
            train_data_dir=train_data_dir,
            eval_data_dir=eval_data_dir,
            train_eval_split=[80, 20],
        )
        assert os.listdir(train_data_dir), "no training files found"
        assert os.listdir(eval_data_dir), "no evaluation files found"

        # Train the model and save it.
        trainer.run(
            train_data_dir=train_data_dir,
            eval_data_dir=eval_data_dir,
            model_dir=model_dir,
            tensorboard_dir=tensorboard_dir,
            checkpoint_dir=checkpoint_dir,
            train_epochs=2,
            batch_size=8,
        )
        assert os.listdir(model_dir), "no model files found"
        assert os.listdir(tensorboard_dir), "no tensorboard files found"
        assert os.listdir(checkpoint_dir), "no checkpoint files found"

        # Load the trained model and make a prediction.
        with open("test_data/56980685061237.npz", "rb") as f:
            input_data = pd.DataFrame(np.load(f)["x"])
        predictions = predict.run(model_dir, input_data.to_dict("list"))

        # Check that we get non-empty predictions.
        assert "is_fishing" in predictions
        assert len(predictions["is_fishing"]) > 0
def run_predict() -> dict:
    import predict

    try:
        args = flask.request.get_json() or {}
        params = {
            "model_dir": args.get("model_dir", f"{TRAINING_DIR}/model"),
            "inputs": args["inputs"],
        }
        predictions = predict.run(**params)

        # Convert the numpy arrays to Python lists to make them JSON-encodable.
        return {
            "method": "predict",
            "model_dir": params["model_dir"],
            "input_shapes": {
                name: np.shape(values)
                for name, values in params["inputs"].items()
            },
            "predictions": predictions,
        }
    except Exception as e:
        return {"error": f"{type(e).__name__}: {e}"}
Example #18
0
def test(FID, show_price_pairs=default_show_price_pairs, retrain=True):

    dataset, price_mmr_state_condition_vin = get_dataset(
        get_filename(FID),
        remove_initial_outliers=False,
        extra_continuous_exclusions=EXTRA_CONTINUOUS_EXCLUSIONS,
        extra_categorical_exclusions=EXTRA_CATEGORICAL_EXCLUSIONS,
        expand_odometer=True,
        capture_condition=True,
        capture_state=True)

    vec = DictVectorizer()
    vectors = vec.fit_transform(dataset).toarray()
    features = vec.get_feature_names()
    # print features

    # create a testing set
    X_train, X_test, y_train, y_test = train_test_split(
        vectors, price_mmr_state_condition_vin, test_size=0.3)

    price_train = [p[0] for p in y_train]

    ###############################################################################

    # IDEAL
    # n_samples > n_features ** 2
    sample_size = len(X_train)
    min_sample_size = len(features) ** 2
    print 'training set: %s samples, %s features^2' % (sample_size, min_sample_size)

    # get the best classifier for the original full set of data
    best_clf = train(X_train, price_train)
    # predict targets on the same set of data
    predicted_y, _, _ = predict.run(best_clf, X_train, y_train, show_prices=False)

    if retrain:
        # remove the worst performers and consider them outliers
        X_train, y_train = filter_worst(X_train, y_train, predicted_y)
        price_train = [p[0] for p in y_train]
        # retrain the model
        best_clf = train(X_train, price_train)
        sample_size = len(X_train)
        min_sample_size = len(features) ** 2
        print '\ntraining set (outliers removed): %s samples, %s features^2' % (sample_size, min_sample_size)

    if sample_size > min_sample_size:
        print 'sufficient sample to feature ratio'
    else:
        print 'WARN - insufficient sample to feature ratio to be highly confident of fit'

    # n_samples > n_features ** 2

    price_test = [p[0] for p in y_test]
    # scores = cross_val_score(best_clf, vectors, selling_prices, cv=5, scoring='r2')
    scores = cross_val_score(best_clf, X_test, price_test, cv=5, scoring='r2')

    print 'Grid Search Results'
    print "Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)

    _, score, explained_variance = predict.run(best_clf,
        X_test, y_test, offset_state=False, offset_condition=False, show_prices=show_price_pairs)

    print 'held out test sample results:'
    print 'test r^2 score: %s' % score
    print 'test explained variance: %s' % explained_variance
    print '\n'
Example #19
0
# -*- coding: utf-8 -*-
# @Time  : 2020/2/16 15:19
# @Author :
# @Desc : ==============================================
# If this runs wrong,don't ask me,I don't know why;  ===
# If this runs right,thank god,and I don't know why. ===
# ======================================================
# @Project : LSTM_CRF_IE
# @FileName: main.py.py
# @Software: PyCharm
import argparse
import train
import predict

parser = argparse.ArgumentParser(description='BiLSTM-CRF for Chinese NER task')
parser.add_argument('--mode',
                    type=str,
                    default='predict',
                    help='train/test/predict')
args = parser.parse_args()

if args.mode == 'predict':
    predict.run()

else:
    train.run(args.mode)
Example #20
0
        help=
        "list of watchwords to look for in tweets unique to the target group")
    parser.add_argument(
        "--pool_worker_scaling",
        help=
        "multiplied against cpu_count to determine number of worker threads",
        default=1.0,
        type=float)
    parser.add_argument(
        "--tweet_fetch_limit",
        help="number of tweets to fetch when calculating statistics",
        default=100,
        type=int)

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--userlist',
                       help="csv containing usernames to fetch data on")
    group.add_argument('--username',
                       help="username of single user to fetch data on")

    args = parser.parse_args()

    return args


if __name__ == "__main__":
    ARGS = fetch_args()
    ARGS = massage(ARGS)
    gather_data.run(ARGS)
    predict.run(ARGS)
Example #21
0
 def rrun(self, img_path, *_):
     df, ans, img = run(img_path)
     return df, ans, img
Example #22
0
File: run.py Project: ljx02/NER
def pred():
    PER, LOC, ORG = predict.run(request.json)
    return jsonify({'PER': PER, 'LOC': LOC, 'ORG': ORG})
Example #23
0
from argparse import ArgumentParser

from predict import run

if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("config_files",
                        type=str,
                        nargs="+",
                        help="Configuration files. See examples in configs/")
    args = parser.parse_args()
    print("Run multiple predictions")
    for config_file in args.config_files:
        try:
            print("\n\n----- run {} -----\n".format(config_file))
            run(config_file)
        except Exception as e:
            print("\n\n !!! Run {} failed !!!\n".format(config_file))
            print("\n{}".format(e))
Example #24
0
def predict():
    img = request.form['image']
    return run(img)