コード例 #1
0
 def generate_categories(cls, kana_title: str) -> list:
     categories = [
         predict(kana_title, "category_1"),
         predict(kana_title, "category_2"),
         predict(kana_title, "category_3")
     ]
     return categories
コード例 #2
0
def prediction():
    list_dirs = ['exp384-ch3-t1', 'exp384-ch3-t4']
    # list_dirs = ['exp768-ch1-t1', 'exp768-ch3-t1']

    for checkpoint_dir in list_dirs:
        mypath = f'../DATA/humpback_whale_siamese_torch/checkpoints/{checkpoint_dir}/'
        files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
        files.sort()

        submit_dir = f'../DATA/humpback_whale_siamese_torch/submissions/{checkpoint_dir}/'
        score_dir = f'../DATA/humpback_whale_siamese_torch/scores/{checkpoint_dir}/'
        os.makedirs(submit_dir, exist_ok=True)
        os.makedirs(score_dir, exist_ok=True)

        for file in tqdm(files, total=len(files)):
            sub_file = join(submit_dir, file.replace('.pt', '.csv'))
            score_file = join(score_dir, file)
            if isfile(score_file):
                continue
            print(file)

            checkpoint = torch.load(mypath + file)
            model = SiameseNet(checkpoint['channel'], checkpoint['features_size'])
            model.load_state_dict(checkpoint['state_dict'])
            model.to(device)
            predict(model, device, 16, sub_file, score_file, checkpoint['norm_zero_one'])
コード例 #3
0
def tune(lr=0.1, dropout=0.3, kernel_num=100, kernel_sizes='3,4,5', embed_dim=100):
    parser = argparse.ArgumentParser()
    parser.add_argument("--lr", type=float, default=lr)
    parser.add_argument("--dropout", type=float, default=dropout)
    parser.add_argument("--kernel_num", type=int, default=kernel_num)
    parser.add_argument("--kernel_sizes", type=str, default=kernel_sizes)
    parser.add_argument("--batch_size", type=int, default=16)
    parser.add_argument("--early_stop", type=int, default=10)
    parser.add_argument("--embed_dim", type=int, default=embed_dim)
    parser.add_argument("--max_len", type=int, default=200)
    parser.add_argument("--class_num", type=int, default=3)
    parser.add_argument("--lr_decay", type=float, default=0.5)
    args = parser.parse_args()

    # print("lr", args.lr, "dropout", args.dropout, "kernel_num", args.kernel_num, "kernel_sizes",args.kernel_sizes, "batch_size", args.batch_size, "early_stop", args.early_stop, "embed_dim", args.embed_dim, "max_len", args.max_len, "class_num", args.class_num, "lr_decay", args.lr_decay)
    train_loader, dev_loader, test_loader, vocab_size = get_dataloaders(args.batch_size, args.max_len)
    model = WordCNN(args, vocab_size, embedding_matrix=None)
    # loss function
    criterion = nn.CrossEntropyLoss()
    # choose optimizer
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)

    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=args.lr_decay)
    model, best_acc = trainer(train_loader, dev_loader, model, optimizer, criterion, early_stop=args.early_stop)

    print('best_dev_acc:{}'.format(best_acc))
    predict(model, test_loader)
    print("This is args", args)
コード例 #4
0
ファイル: config.py プロジェクト: deepmipt/stand_squad_en
def main(_):
    config = flags.FLAGS
    if config.mode == "train":
        train(config)
    elif config.mode == "prepro":
        copyfile(_[0], dir + 'config.py')
        prepro(config)
    elif config.mode == "debug":
        config.num_steps = 2
        config.val_num_batches = 1
        config.checkpoint = 1
        config.period = 1
        train(config)
    elif config.mode == "test":
        if config.use_cudnn:
            print(
                "Warning: Due to a known bug in Tensorlfow, the parameters of CudnnGRU may not be properly restored."
            )
        test(config)
    elif config.mode == "predict":
        if config.use_cudnn:
            print(
                "Warning: Due to a known bug in Tensorlfow, the parameters of CudnnGRU may not be properly restored."
            )
        predict(config)
    elif config.mode == "test_sber":
        if config.use_cudnn:
            print(
                "Warning: Due to a known bug in Tensorlfow, the parameters of CudnnGRU may not be properly restored."
            )
        test_sber(config)
    else:
        print("Unknown mode")
        exit(0)
コード例 #5
0
def run():
    """
    Prepares and runs the whole system.
    """
    args = parse_args()

    logger = logging.getLogger("brc")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    if args.log_path:
        file_handler = logging.FileHandler(args.log_path)
        file_handler.setLevel(logging.INFO)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
    else:
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.INFO)
        console_handler.setFormatter(formatter)
        logger.addHandler(console_handler)

# string=",".join([g_num for g_num in args.gpus])
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = string
    logger.info('Running with args : {}'.format(args))
    if args.prepare:
        prepare(args)
    if args.train:
        train(args)
    if args.evaluate:
        evaluate(args)
    if args.predict:
        predict(args)
コード例 #6
0
ファイル: test.py プロジェクト: ryan147k/NER-pytorch
def main():
    with open(r'D:\Project File\数据融合\中国上市公司meta数据.jsonl', 'r', encoding='utf-8') as f:
        lines = f.readlines()
    for line in lines:
        dic = demjson.decode(line)
        name = dic['compName']
        keywords = dic['keywords']
        description = dic['description']

        res = {
            "name": name,
            "alias": []
        }

        alias_list1, alias_list2 = [], []
        if keywords != '':
            words = keywords_split(keywords)
            # predict(words)
            for word in words:
                if word != '':
                    tag_list, _ = predict(word)
                    if 'B-Com' and 'I-Com' in tag_list:
                        alias_list1.append(word)
        if description != '':
            _, alias_list2 = predict(description)

        alias_list = list(set(alias_list1 + alias_list2))
        for alias in alias_list:
            res['alias'].append(alias)

        with open('./alias.jsonl', 'a', encoding='utf-8') as fout:
            fout.write('{}\n'.format(json.dumps(res, ensure_ascii=False)))
コード例 #7
0
def predict_all(load_path, dir_path, save_dir_path):
    _list = os.walk(dir_path)
    for root, _, files in _list:
        for file in files:
            file_path = os.path.join(root, file)
            print('current file: ', file_path)
            save_path = os.path.join(save_dir_path, file)
            if not os.path.exists(save_path):
                predict(load_path=load_path,
                        file_path=file_path,
                        save_path=save_path)
コード例 #8
0
ファイル: test.py プロジェクト: ashrafbeshtawi/Neuralnetwork
def test_predict_1():
    input = np.array([8, 9])
    w = [np.array([[1, 2], [3, 4], [5, 6]])]
    b = [np.array([[1, 2, 3]])]
    result = predict(input, w, b, Relu, sig)
    print("correct", np.add(np.dot(w, input), b))
    print("result", result[1])
コード例 #9
0
    def main(self, query):
        # print("My name is Chatterbot and I'm a chatbot. If you want to exit, type Bye!")

        user_response = query.lower()
        print(user_response)

        # user_intent = self.intentClassifier.predict(user_response)
        # print("intent is: %s" % (user_intent))

        prediction = predict([query])
        user_intent = prediction[0][1]
        search_features = extract_features(prediction[0][0], prediction[0][1],
                                           prediction[0][2])
        # print(pprint([(X.text, X.label_) for X in search_features]))

        if search_features:
            self.infoExtractor.extractSearchParams(search_features)
            self.resp = self.complexResponse()
        elif user_intent:
            self.resp = self.simpleResponse(user_intent)
        else:
            self.resp = self.initiatorResponse()

        print("---------------" + self.resp[0])
        if self.reset:
            self.reset = None
            self.infoExtractor.resetParams()
        return self.resp
コード例 #10
0
def start_prediction(num_days, classifier):

    logging.info("********************************* Running Initial Prediction: {} *****************************************".format(datetime.now()))

    # This data is saved in "/home/ubuntu/seedion/cron_predictor.log"
    print("********** Running Initial Prediction: ", datetime.now(), " **********")

    date = datetime.now()
    future_date = date + timedelta(days=num_days)

    start_time = time.time()
    data, teams_data = get_future_data(date, future_date)

    games_data = []
    for index, game in data.iterrows():
        game_data = buildGameData(game, teams_data)
        games_data.append(game_data)

    predictions = predict(games_data, classifier, False)

    end_time = time.time()
    logging.info("Elapsed: {}".format(end_time-start_time))

    # This data is saved in "/home/ubuntu/seedion/cron_predictor.log"
    print("********** Finished Initial Prediction **********")

    logging.info("********************************* Finished Running Initial Prediction ********************************")
コード例 #11
0
def predictMe():

    result = predict(request.form['text'], request.form['name'],
                     request.form['email'],
                     request.form['phone'])  #can remove name email phone
    # test = json.load(result)
    return render_template('predict.html', pred=result)
コード例 #12
0
def main():
    train_file = 'data/train.csv'
    test_file = 'data/test.csv'
    label_file = 'data/label.csv'
    # create dataset
    test_label = list()
    with open(label_file) as f:
        for line in f.readlines():
            line = line.strip()
            test_label += [float(line)]

    cluster_number, top_sql = train('data/train.csv', 'data/', 2000, 40)
    print('Best cluster number is: ' + str(cluster_number))
    print('Typical SQL template is: ')
    print(top_sql)
    result = predict('data/test.csv', 'data/', 0.1)

    # plot
    x = range(len(result))
    scores = r2_score(test_label, result, multioutput='variance_weighted')
    plt.scatter(x, test_label, marker='o', label='actual value')
    plt.scatter(x, result, marker='*', label='predicted value')
    plt.title("acc: " + str(scores * 100))
    plt.legend()
    plt.show()
コード例 #13
0
ファイル: base.py プロジェクト: iamthem/Ultrastat
        def drawTable():
            df = main.predict('bundes', 15)

            self.table = pt = Table(frame1, dataframe=df,
            showtoolbar=False, showstatusbar=True)

            pt.show()
def scan_button(num_of_scans):
    cap = cv2.VideoCapture(0)
    return_value, image = cap.read()
    cv2.imwrite('scan' + str(num_of_scans) + '.jpg', image)
    prediction = predict('scan' + str(num_of_scans) + '.jpg', w.model_dict)
    print(prediction)
    iter(num_of_scans, 1)
    w.last_prediction = prediction
コード例 #15
0
 def predict(self, pickled_picture, weight, height):
     picture = pickle.loads(pickled_picture)
     verts, adjusted_weight, adjusted_height = main.predict(
         picture, weight, height)
     measurements = output_measurements(verts)
     return pickle.dumps(
         (measurements, verts, adjusted_weight, adjusted_height),
         protocol=0)
コード例 #16
0
def index():
    plaintext = request.args.get("text")

    # 调试时在此修改,如能够正确返回再在bspredict用相同的方式改
    # 调试时,浏览器访问“ 127.0.0.1:5001/?text=待做摘要的文本 ”,看有没有结果

    summary = predict(plaintext)

    return jsonify({"summary": summary})
コード例 #17
0
    def update(self):
        ret, frame = self.vid.get_frame()

        if ret:
            predicted_img = main.predict(frame)
            self.photo = ImageTk.PhotoImage(image = Image.fromarray(predicted_img))
            self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)

        self.window.after(self.delay, self.update)
コード例 #18
0
ファイル: server.py プロジェクト: alokrkmv/revenue_predictor
def predictor():
    req_data = request.json
    req_data = ast.literal_eval(req_data)
    resp_data = predict(req_data)
    decimal.getcontext().rounding = decimal.ROUND_DOWN
    c = decimal.Decimal(resp_data)
    resp_data = float(round(c,4))
    resp_data = json.dumps({"res":resp_data})
    resp = Response(resp_data, status=200, mimetype='application/json')
    return resp
コード例 #19
0
ファイル: service.py プロジェクト: bohrlafette/ml
def predict():
    json = request.json

    predictionUser = engine.user(json.get("name"), json.get("gender"),
                                 json.get("numChildren", 0),
                                 json.get("ownsHouse", 0),
                                 json.get("yearBorn"), json.get("numCats", 0),
                                 json.get("numDogs", 0),
                                 json.get("numHorses", 0))
    return jsonify(engine.predict(predictionUser))
コード例 #20
0
def upload_file():
    # check if the post request has the file part
    if 'file' not in request.files or request.files['file'].filename == '':
        return jsonify([{"message": "No files found"}])

    file = request.files['file']
    if file and allowed_file(file.filename):
        results = main.predict(file)
        return jsonify(results)

    return jsonify([{"message": "Something went wrong"}])
コード例 #21
0
def main(test_file, model_path, output_file):
    x_test, y_test, x_control_test = load_json(test_file)

    # X = ut.add_intercept(X) # add intercept to X before applying the linear classifier
    x_test = ut.add_intercept(x_test)

    w = np.load(model_path)

    predictions = predict(w, x_test).tolist()
    output_file = open(output_file, "w")
    json.dump(predictions, output_file)
    output_file.close()
コード例 #22
0
ファイル: app.py プロジェクト: vaomaohaoone/qweqweasdasd
def load_and_predict():
    token = request.headers.get('Authorization')
    time = datetime.today().timestamp()
    payload = jwt.decode(token[7:], base64.b64decode(app.config.get('SECRET_KEY')), algorithms='HS256')
    if payload['sub'] == app.config['user'] and payload['token_type'] == 'access_token' and payload['roles'] == app.config['role']:
        if request.method == 'POST':
            some_json = request.get_json()
            result = predict(some_json)
            return 'predict response: {}'.format(result)
        else:
            return payload
    else:
        return "invalid token"
コード例 #23
0
def crossValidation(k):
    data, labels = m.getFrameMats('data/ucf-101/')
    label_data = list(zip(data, labels))
    random.shuffle(label_data)
    data, labels = list(zip(*label_data))
    fold_size = len(data) // k

    overall_conf_mat = np.zeros((m.CLASS_SIZE, m.CLASS_SIZE, 1))

    for i in range(0, k):
        model_path = basepath + "_fold" + str(i)

        end_offset = 1 if i < (k - 1) else 0
        left_data = data[0:fold_size * i]
        left_labels = labels[0:fold_size * i]
        right_data = data[fold_size * (i + 1) + end_offset:]
        right_labels = labels[fold_size * (i + 1) + end_offset:]

        train_data = []
        train_labels = []
        train_data.extend(left_data)
        train_data.extend(right_data)
        train_labels.extend(left_labels)
        train_labels.extend(right_labels)

        eval_data = []
        eval_labels = []
        eval_data.extend(data[fold_size * i:fold_size * (i + 1)])
        eval_labels.extend(labels[fold_size * i:fold_size * (i + 1)])

        train_data_arr = np.asarray(train_data)
        train_labels_arr = np.asarray(train_labels)
        eval_data_arr = np.asarray(eval_data)
        eval_labels_arr = np.asarray(eval_labels)

        m.train(train_data_arr, train_labels_arr, model_path)
        results = m.predict(eval_data_arr, eval_labels_arr, model_path)

        predicted_labels = []

        for result in results:
            predicted_labels.append(result['classes'])

        conf_mat = generateConfusionMatrix(eval_labels, predicted_labels)
        overall_conf_mat = np.add(overall_conf_mat, conf_mat)

        print('Fold: ' + str(i))

    averaged_conf_mat = np.divide(np.float32(overall_conf_mat), k)
    outputConfusionMatrix(averaged_conf_mat)
    generatePerfMeasures(averaged_conf_mat)
コード例 #24
0
ファイル: server.py プロジェクト: axelniklasson/HelpER
def post_index():
    temp = request.get_json()
    pain = int(temp['pain'])
    category = int(temp['category'])
    waitingTime, queue = predict(category, pain)
    waitingTime.tolist()
    queue.tolist()

    return jsonify(waitingTime0=waitingTime[0],
                   waitingTime1=waitingTime[1],
                   waitingTime2=waitingTime[2],
                   queue0=queue[0],
                   queue1=queue[1],
                   queue2=queue[2])
コード例 #25
0
async def prepareTracks():
    playlistIds = await browseToplists()
    for playlist in playlistIds:
        tracks = await playlistTracks(playlist)
        for track in tracks:
            # Omit null preview url.
            if track.preview is None:
                print(track, "missing preview")
                continue
            print(track)
            if predict(track.title) is "Relaxing":
                relaxingTracks.append(track)
            else:
                nonRelaxingTracks.append(track)
コード例 #26
0
 def test_predict_future_reward(self):
     """When predicting future rewards, we want to see the network give correct directions"""
     good_sequence = [
         ([0,0,0,0],1,[0,0,0,1]),
         ([0,0,0,1],0,[1,0,1,0]),
         ([1,0,1,0],1,[1,1,1,1]),
     ]
     bad_sequence = [
         ([0,0,0,0],0,[1,0,0,1]),
         ([1,0,0,1],1,[0,0,1,0]),
         ([0,0,1,0],1,[0,1,1,1]),
     ]
     def expand(r, final_reward):
         results = []
         for i,(state,action,new_state) in enumerate(r):
             record = {
                 'state': np.array(state,'f'),
                 'new_state': np.array(new_state,'f'),
                 'action': action,
                 'done': i >= len(r),
                 'reward': final_reward
             }
             results.append(record)
         assert results[-1]['reward'] == final_reward
         return results 
     records = expand(good_sequence,1.0) + expand(bad_sequence,-1.0)
     print(records)
     records = records * 256
     model = main.build_model(env)
     main.train_model( model, records, env, batch_size=8)
     for (state,action,new_state) in good_sequence:
         prediction = main.predict(model,state)
         assert np.argmax(prediction) == action, (state,action,prediction)
     
     for (state,action,new_state) in bad_sequence:
         prediction = main.predict(model,state)
         assert np.argmax(prediction) != action, (state,action,prediction)
コード例 #27
0
ファイル: helper.py プロジェクト: hackoose/cfi-team30
def learn_and_predict(dates, attendance, date_predict):
    date = []
    for i in dates:
        date.append([int(datetime.datetime.strptime(str(i), "%Y-%m-%d").strftime("%u"))])

    Y = []
    for i in attendance:
        Y.append([int(i)])

    X = np.asarray(date)
    y = np.asarray(Y)
    dt = [int(datetime.datetime.strptime(str(date_predict), "%Y-%m-%d").strftime("%u"))]
    pr = np.asarray([dt])

    return predict(X, y, pr)
コード例 #28
0
def predict_taxonomy():
    try:
        if request.method == 'POST':
            f = request.files['file']
            random_file_name = 'received_files/' + randomString() + '.pdf'
            f.save(random_file_name)
            result = predict(random_file_name)
            print('result', result)
            return result
        else:
            return False

    except Exception as e:
        print(e)
        return e
コード例 #29
0
ファイル: server.py プロジェクト: pieromarini/PyNeuNet
def digit():
    # Take the image and preprocess it.
    # predict and get the label
    # send the label as json to the template.
    if request.method == 'POST':
        img = request.get_json()

        # Load trained network file.
        with open('./trained-network', 'rb') as f:
            weights = pickle.load(f)

        digit, prob = predict(img, weights)
        prob = "{0:.2f}".format(prob.item())

        data = {'digit': digit.item(), 'prob': prob}
        return jsonify(data)
コード例 #30
0
def result():
   """
   Function for returning the result obatined from main.py
   """
   if request.method == 'POST': 
      result = request.form
      ## Converting the input data to a dictionary ##
      for key, value in result.items():
        data.update({key:[value]})
      ## Converting dictionary to a dataframe ##
      df = pd.DataFrame.from_dict(data)
      df.insert(0,"id", 419)
      ## Declaring a variable for the output received from the predict function ##
      predicted = predict(df)
      ## Render the Result.htmml file ##
      return render_template("result.html",result = predicted)
コード例 #31
0
def api():

    print('in')

    postData = request.json
    imgData = postData['imgData']

    #print(imgData)

    dbs = {}

    label, lblname = predict(decode(imgData))
    print(lblname)
    df = pd.read_csv('final.csv', index_col=0)

    indexValues = df.columns.values
    data = df.loc[label]
    dbNames = list(set(indexValues) - set(['label', 'name']))

    for dbName in dbNames:

        db = {}
        ref = pd.read_csv('database-new/' + dbName + '.csv')
        reqId = data[dbName]
        reqData = ref.loc[ref['id'] == reqId]
        #print(reqData)

        for column in reqData:
            if not column == 'image':
                value = reqData[column].tolist()
                if not value == []:
                    value = value[0]
                    #print(type(value))
                    if isinstance(value, long):
                        value = str(value).strip('L')
                    #print(column, value)
                    db[column] = value
        if not db == {}:
            dbs[dbName] = db

    print(dbs)

    return Response(response=json.dumps(dbs),
                    status=200,
                    mimetype="application/json")