コード例 #1
0
def predict_test_averaging(t):
    preds = np.zeros((61191, 17))
    # imgs = test_dataloader.dataset.images.copy()
    # iterate over models
    for index, model in enumerate(models):
        name = str(model).split()[1]
        net = nn.DataParallel(model().cuda())
        net.load_state_dict(
            torch.load(
                '/mnt/home/dunan/Learn/Kaggle/planet_amazon/model/{}.pth'.
                format(name)))
        net.eval()
        # iterate over transformations
        for transformation in transforms:
            # imgs = transformation(imgs)
            test_dataloader.dataset.images = transformation(
                test_dataloader.dataset.images)
            pred = predict(dataloader=test_dataloader, net=net)
            preds = preds + pred

    preds = preds / (len(models) * len(transforms))
    # preds = preds / len(models)
    pred_csv(predictions=preds,
             threshold=t,
             name='transforms-res50-152dense-ensembels')
コード例 #2
0
def probs(dataloader):
    """
    returns a numpy array of probabilities (n_transforms, n_models, n_imgs, 17)
    use transforms to find the best threshold
    use models to do ensemble method
    """
    n_transforms = len(transforms)
    n_models = len(models)
    n_imgs = dataloader.dataset.num
    imgs = dataloader.dataset.images.copy()
    probabilities = np.empty((n_transforms, n_models, n_imgs, 17))
    for t_idx, transform in enumerate(transforms):
        t_name = str(transform).split()[1]
        dataloader.dataset.images = transform(imgs)
        for m_idx, model in enumerate(models):
            name = str(model).split()[1]
            net = model().cuda()
            name = 'full_data_{}.pth'.format(name)
            net = nn.DataParallel(net)
            net.load_state_dict(
                torch.load(
                    '/mnt/home/dunan/Learn/Kaggle/planet_amazon/model/{}'.
                    format(name)))
            net.eval()
            # predict
            m_predictions = predict(net, dataloader)

            # save
            np.savetxt(
                X=m_predictions,
                fname=
                '/mnt/home/dunan/Learn/Kaggle/planet_amazon/probs/{}_{}.txt'.
                format(t_name, name))
            probabilities[t_idx, m_idx] = m_predictions
    return probabilities
コード例 #3
0
def test():
    net = nn.DataParallel(densenet161().cuda())
    net.load_state_dict(torch.load('models/densenet161.pth'))
    net.eval()

    dataset = KgForestDataset(split='test-61191',
                              transform=Compose([
                                  Lambda(lambda x: toTensor(x)),
                                  Normalize(mean=mean, std=std)
                              ]),
                              height=256,
                              width=256,
                              label_csv=None)

    test_loader = DataLoader(dataset,
                             batch_size=512,
                             shuffle=False,
                             pin_memory=True)
    probs = predict(net, test_loader)

    # probs = np.empty((61191, 17))
    # current = 0
    # for batch_idx, (images, im_ids) in enumerate(test_loader):
    #     num = images.size(0)
    #     previous = current
    #     current = previous + num
    #     logits = net(Variable(images.cuda(), volatile=True))
    #     prob = F.sigmoid(logits)
    #     probs[previous:current, :] = prob.data.cpu().numpy()
    #     print('Batch Index ', batch_idx)

    pred_csv(probs, name='densenet161', threshold=BEST_THRESHOLD)
コード例 #4
0
def predict_test_majority():
    """
    Majority voting method.
    """
    labels = np.empty((len(models), 61191, 17))
    for m_idx, model in enumerate(models):
        name = str(model).split()[1]
        print('predicting model {}'.format(name))
        net = nn.DataParallel(model().cuda())
        net.load_state_dict(
            torch.load(
                '/mnt/home/dunan/Learn/Kaggle/planet_amazon/model/full_data_{}_10xlr.pth'
                .format(name)))
        net.eval()
        preds = np.zeros((61191, 17))
        for t in transforms:
            test_dataloader.dataset.images = t(test_dataloader.dataset.images)
            print(t, name)
            pred = predict(net, dataloader=test_dataloader)
            preds = preds + pred
        # get predictions for the single model
        preds = preds / len(transforms)
        np.savetxt(
            '/mnt/home/dunan/Learn/Kaggle/planet_amazon/submission_probs/full_data_{}_10xlr_224and256.txt'
            .format(name), preds)
        # get labels
        preds = (preds > thresholds[name]).astype(int)
        labels[m_idx] = preds

    # majority voting
    labels = labels.sum(axis=0)
    labels = (labels >= (len(models) // 2)).astype(int)
    pred_csv(predictions=labels,
             name='majority_voting_ensembles_split_data_10xlr_224and256')
コード例 #5
0
def pred_test():
    imgs = test_loader.dataset.images
    for idx, t in enumerate(transforms):
        print('[!]Transforms {}'.format(str(t).split()[1]))
        test_loader.dataset.images = t(imgs)
        preds = predict(net, test_loader)
        pred_labels = (preds > thresholds['blender']).astype(int)
        np.savetxt('submission_probs/full_data_{}_blender.txt'.format(str(t).split()[1]), pred_labels)
コード例 #6
0
def login():
    if request.method == 'POST':
        data = request.form
        species = util.predict(data)
        response = jsonify({'species': species})
        response.headers.add('Access-Control-Allow-Origin', '*')

        return response
コード例 #7
0
ファイル: model_train.py プロジェクト: pspiagicw/lobe
def main(no_of_label):
    train_data, test_data = get_data()
    model = keras.Sequential([
        keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu"),
        keras.layers.MaxPool2D(pool_size=(2, 2)),
        keras.layers.Conv2D(filters=64, kernel_size=3, activation="relu"),
        keras.layers.MaxPool2D(pool_size=(2, 2)),
        keras.layers.Flatten(),
        keras.layers.Dense(512, activation="relu"),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(no_of_label, activation="sigmoid")
    ])
    model.compile(optimizer="adam",
                  loss="sparse_categorical_crossentropy",
                  metrics=['accuracy'])
    model.fit(train_data, validation_data=test_data, epochs=20)
    predict(model)
コード例 #8
0
ファイル: app.py プロジェクト: NullOsama/Dog-classification
def uploader():
    # save the uploaded image and apply transformation, then predict.
    if request.method == 'POST':
        profile = request.files['file']
        save_at = os.path.join(uploads_dir, profile.filename)
        profile.save(save_at)
        pred = predict(model, save_at)
        return render_template('upload.html',
                               img_src=profile.filename,
                               prediction=pred)
コード例 #9
0
def pred_valid():
    # preds = np.empty((len(transforms), valid_loader.dataset.images.shape[0], 17))
    imgs = valid_loader.dataset.images.copy()
    for t in transforms:
        name = 'blender'
        t_name = str(t).split()[1]
        print('[!]Transform: {}'.format(t_name))
        valid_loader.dataset.images = t(imgs)
        pred = predict(net, valid_loader)
        np.savetxt('probs/{}_{}.txt'.format(t_name, name), pred)
コード例 #10
0
def predict_vua_allpos(RNNseq_model):
    preds = {}
    for (embed, pos_seq, txt_sent_id) in embedded_test_vua:
        ex_data = TextDataset([embed], [pos_seq], [[0 for pos in pos_seq]])
        ex_dataloader = DataLoader(dataset=ex_data,
                                   batch_size=1,
                                   collate_fn=TextDataset.collate_fn)
        pred = predict(ex_dataloader, RNNseq_model, using_GPU)
        preds[txt_sent_id] = pred[0][0]
    return preds
コード例 #11
0
def predict_vua(rnn_clf):
    preds = {}
    for (embed, txt_sent_id) in embedded_test_vua:
        ex_data = TextDataset([embed], [0])
        ex_dataloader = DataLoader(dataset=ex_data,
                                   batch_size=1,
                                   collate_fn=TextDataset.collate_fn)
        pred = predict(ex_dataloader, rnn_clf, using_GPU)
        preds[txt_sent_id] = pred.item()
    return preds
コード例 #12
0
ファイル: calc_week.py プロジェクト: v/league_predict
def compute_week_no_meta(week):
    actual_champs = free_champ_data[week]

    predictions = predict(week, None)

    ten = matches(predictions[:10], actual_champs)
    twenty = matches(predictions[:20], actual_champs)
    thirty = matches(predictions[:30], actual_champs)

    return ten, twenty, thirty
コード例 #13
0
ファイル: predict.py プロジェクト: sc2646/udacity-dsnd
def main():
    parser = argparse.ArgumentParser(description='Predict a file.')
    parser.add_argument('input_img',
                        type=str,
                        help='Path of the input image',
                        default="./flowers/test/2/image_05100.jpg")
    parser.add_argument('checkpoint',
                        type=str,
                        help='Path of the checkpoint',
                        default="./checkpoint.pth")
    parser.add_argument('--top_k', help='Top k', type=int, default=5)
    parser.add_argument('--gpu',
                        action='store_true',
                        help='Use GPU for inference if GPU is available')
    parser.add_argument('--category_names',
                        action="store",
                        default='cat_to_name.json')

    args, _ = parser.parse_known_args()

    input_img = args.input_img
    checkpoint = args.checkpoint

    category_names = 'cat_to_name.json'
    if args.category_names:
        category_names = args.category_names

    top_k = 5
    if args.top_k:
        top_k = args.top_k

    cuda = False
    if args.gpu:
        if torch.cuda.is_available():
            cuda = True
        else:
            print("GPU flag was set but no GPU is available in this machine.")

    loaded_model = util.load_checkpoint(checkpoint, cuda)

    with open('cat_to_name.json', 'r') as json_file:
        cat_to_name = json.load(json_file)

    probabilities, classes = util.predict(input_img, loaded_model, top_k)

    labels = [cat_to_name[str(int(index) + 1)] for index in classes]
    probability = np.array(probabilities)

    i = 0
    while i < top_k:
        print("{} with a probability of {:.2f}%".format(
            labels[i], probability[i] * 100))
        i += 1

    print("Prediction Done.")
コード例 #14
0
    def test(self, X, y):

        prediction = predict(self.all_theta, X)

        acc = accuracy_score(np.argmax(y,axis=1), prediction)
        # prec = precision(y, prediction)
        # recall = recall(y, prediction)

        print 'accuracy : %f%%' % (acc * 100)

        self.acc.append(acc)
コード例 #15
0
def predict_test(t):
    preds = np.zeros((61191, 17))
    for index, model in enumerate(models):
        name = str(model).split()[1]
        net = nn.DataParallel(model().cuda())
        net.eval()
        net.load_state_dict(torch.load('models/{}.pth'.format(name)))
        pred = predict(dataloader=test_dataloader, net=net)
        preds = preds + pred

    preds = preds / len(models)
    pred_csv(predictions=preds, threshold=t, name='ensembles')
コード例 #16
0
def predict():
    global message
    if request.method=="POST":
        a  = request.form['PAY_1']
        b = request.form['LIMIT_BAL']
        c = request.form['PAY_AMT1']
        d = request.form['PAY_AMT2']
        e = request.form['PAY_AMT3']

        message = util.predict(a,b,c,d,e)

    return render_template("predict.html",message=message)
コード例 #17
0
ファイル: random_forest.py プロジェクト: brian50208lee/ML2017
def main():
	#f = open('result.txt', 'a')
	X, test = util.load_data()
	Y = util.load_target()
	#util.save_data(X, Y, test)
	#scaler = preprocessing.MinMaxScaler()
	#X = scaler.fit_transform(X)

	x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.1)

	clf = RandomForestClassifier(n_estimators= 200)

	clf.fit(x_train, y_train)

	pred = clf.predict(x_val)

	print(accuracy_score(y_val, pred))

	pred =  clf.predict(test)

	util.predict(pred)
コード例 #18
0
def home():
    if request.method=="POST":
        a  = request.form['spread']
        b = request.form['mdvp_apq']
        c = request.form['ppe']
        d = request.form['mdvp_shimmer']
        e = request.form['mdvp_shimmerdb']

        message = util.predict(a,b,c,d,e)

        return render_template("predict.html",message=message)
    return render_template("index.html")
コード例 #19
0
ファイル: inference.py プロジェクト: freeskyES/help-Challenge
def inference():
    test_model = pickle.load(open(os.path.join(VOL_DIR, 'model.dat'), 'rb'))
    person_table, condition_occurrence_table, outcome_cohort_table, measurement_table = util.load_data_set(
        TEST_DIR)
    measurement_table = util.preprocess_measurement(measurement_table)
    y_pred, y_proba = util.predict(test_model, person_table,
                                   condition_occurrence_table,
                                   measurement_table, outcome_cohort_table)
    predict_result = pd.DataFrame({
        'LABEL': y_pred,
        'LABEL_PROBABILITY': y_proba
    })
    predict_result.to_csv(os.path.join(OUTPUT_DIR, 'output.csv'), index=False)
コード例 #20
0
ファイル: emg_core.py プロジェクト: ShengqiWang/EMG_LAB
    def predict(self):
        path = "{}/model-{}.model".format(
            PATH_PREFIX, self.person_id)
        f=open(path,'rb').read()
        model_dict=pickle.loads(f)
        self.scaler=model_dict["scaler"]
        self.label_encoder=model_dict["label_enc"]
        self.clf=model_dict["clf"]


        self.data_tmp = np.ones(shape=(1, self.num_feat))

        # Init transmission
        ctl_data = "START\r\n\r\n"
        self.CTL_CLI.send(ctl_data.encode())

        #while not self.TERMINATED:
        while not self.TERMINATED:
            # receive one observation
            raw_emg_data = self.receive()

            # decode raw data into floats
            emg_data = np.zeros(shape=(int(self.sample_time * self.sample_rate), len(self.sensors)))
            for (sample_index, _), chunk_data in np.ndenumerate(raw_emg_data):
                for id_index, sensor_id in enumerate(self.sensors):
                    raw_tmp = chunk_data[(sensor_id - 1) * 4 : sensor_id * 4]
                    decoded_data = struct.unpack("f", raw_tmp)[0]
                    emg_data[sample_index][id_index] = decoded_data

            # fake data
            #sleep(self.sample_time)
            #emg_data = np.random.rand(int(self.sample_time * self.sample_rate), len(self.sensors))

            #for sensor_id, sensor in enumerate(self.sensors):
            #    self.signal[:, sensor - 1] = emg_data[:, sensor_id]

            # calculate feature
            data_tmp = util.feat_gen(emg_data, waveLength=1, mav=1, ar4c=1)
            data_tmp = self.scaler.transform(data_tmp)

            # classifying
            prediction = util.predict(data_tmp, self.clf, model=self.model)
            # self.y_pre = (
            #     self.label_encoder.inverse_transform([int(prediction[0])])[0]
            #     + str(prediction[1]),
            #     int(prediction[0]),
            #     prediction[1],
            # ) 
            self.y_pre=[self.label_encoder.inverse_transform([int(prediction[0])])[0],str(round(prediction[1],4))]
            self.CHANGE_FLAG = True
コード例 #21
0
def call_predict(image_path, checkpoint, json_file, topk, gpu):

    print ('image_path: {},\t checkpoint:{},\t json_file: {},\t topk: {},\t gpu: {}\n'.format(image_path, checkpoint, json_file, topk, gpu))

    print ('Predicting...')
    probs, classes, names = predict(image_path, checkpoint, json_file, topk, gpu)
    print ('Predicting Done.\n')

    results = []
    if len(names):
        results = list(zip(probs, names))
    else:
        results = list(zip(probs, classes))

    max_guess = max(results,key=lambda item:item[0])

    print ('Max Guess => Prob.: {}, Class: {}\n'.format(max_guess[0], max_guess[1]))
    print ('top{} Results:'.format(topk))
    print (results)
コード例 #22
0
ファイル: calc_week.py プロジェクト: v/league_predict
def compute_week(week):
    actual_champs = free_champ_data[week]

    top_10 = []
    top_20 = []
    top_30 = []

    for meta in METAS:
        predictions = predict(week, meta)

        top_10 += predictions[:2]
        top_20 += predictions[:4]
        top_30 += predictions[:6]


    ten = matches(top_10, actual_champs)
    twenty = matches(top_20, actual_champs)
    thirty = matches(top_30, actual_champs)

    return ten, twenty, thirty
コード例 #23
0
def predict_test_averaging(t):
    preds = np.zeros((61191, 17))
    # imgs = test_dataloader.dataset.images.copy()
    # iterate over models
    for index, model in enumerate(models):
        name = str(model).split()[1]
        net = nn.DataParallel(model().cuda())
        net.load_state_dict(torch.load('models/{}.pth'.format(name)))
        net.eval()
        # iterate over transformations
        for transformation in transforms:
            # imgs = transformation(imgs)
            test_dataloader.dataset.images = transformation(
                test_dataloader.dataset.images)
            pred = predict(dataloader=test_dataloader, net=net)
            preds = preds + pred

    preds = preds / (len(models) * len(transforms))
    # preds = preds / len(models)
    np.savetxt('submission_probs/fpn_152', preds)
    pred_csv(predictions=preds, threshold=t, name='fpn-152')
コード例 #24
0
ファイル: predict.py プロジェクト: leoliu2008/Udacity
def main():

    model = util.load_checkpoint(pa.load_dir)

    with open(pa.category_name, 'r') as f:
        cat_to_name = json.load(f)


#     print(cat_to_name.get)

#     print(len(cat_to_name))

    top_p_list, top_flowers = util.predict(image_path, pa.load_dir,
                                           cat_to_name, pa.top_k)
    #     labels = [cat_to_name[str(index+1)] for index in np.naray(top_p_list[1][0])

    #     return print(top_p_list[0])
    i = 0
    while i < pa.top_k:
        print('{} with a probability of {}'.format(top_flowers[i],
                                                   top_p_list[i]))
        i += 1
コード例 #25
0
def predict_test(t):
    preds = np.zeros((61191, 17))
    # imgs = test_dataloader.dataset.images.copy()
    # iterate over models
    for index, model in enumerate(models):
        name = str(model).split()[1]
        net = nn.DataParallel(model().cuda())
        net.load_state_dict(torch.load('models/{}.pth'.format(name)))
        net.eval()
        # iterate over transformations
        for transformation in transforms:
            # imgs = transformation(imgs)
            test_dataloader.dataset.images = transformation(
                test_dataloader.dataset.images)
            pred = predict(dataloader=test_dataloader, net=net)
            preds = preds + pred

    preds = preds / (len(models) * len(transforms))
    # preds = preds / len(models)
    pred_csv(predictions=preds,
             threshold=t,
             name='transforms-resnet152_densenet161_densent169-ensembels')
コード例 #26
0
def process_frame(frame):
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_frame = frame[:, :, ::-1]

    # Find all the faces and face encodings in the current frame of video
    face_locations = face_recognition.face_locations(rgb_frame)
    face_encodings = face_recognition.face_encodings(
        rgb_frame, face_locations, model="large")

    face_names = []
    predictions = predict(face_encodings, library)
    for prediction in predictions:
        face_names.append(prediction['name'])

    # Label the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        if not name:
            continue

        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        frame = draw_text(frame, name, left + 10, bottom + 10)
    return frame, face_locations, face_names
コード例 #27
0
def make_test_labels():
    # labels = np.empty((len(models), 61191, 17))
    for m_idx, model in enumerate(models):
        name = str(model).split()[1]
        threshold = thresholds[name]
        print('Model {}'.format(name))
        print('threshold is {}'.format(threshold))
        net = nn.DataParallel(model().cuda())
        net.load_state_dict(torch.load('models/full_data_{}.pth'.format(name)))
        net.eval()
        preds = np.zeros((61191, 17))
        for t in transforms:
            test_dataloader.dataset.images = t(test_dataloader.dataset.images)
            print(t, name)
            p = predict(net, dataloader=test_dataloader)
            preds = preds + (p > threshold).astype(int)
            # get predictions for the single model
            # preds = preds/len(transforms)
            # np.savetxt('submission_probs/full_data_{}.txt'.format(name), preds)
            # get labels
            # preds = (preds > thresholds[m_idx]).astype(int)
        preds = (preds >= (len(transforms) // 2)).astype(int)
        np.savetxt('submission_preds/full_data_{}.txt'.format(name), preds)
コード例 #28
0
def clf():
    request_json = request.get_json()

    if request_json.get("image"):
        try:
            results, confidence_score = predict(model,
                                                request_json.get("image"))
        except Exception as error:
            return jsonify({
                "description": repr(error),
                "error": "Bad Request",
                "status_code": "401"
            })
        return jsonify({
            "results": results,
            "confidence": confidence_score,
            "status_code": "200"
        })
    else:
        return jsonify({
            "description": "Input Image is Missing",
            "error": "Bad Request",
            "status_code": "401"
        })
コード例 #29
0
ファイル: __main__.py プロジェクト: fcbruce/machine-learning
#
#
# Author : fcbruce <*****@*****.**>
#
# Time : Sat 27 Feb 2016 08:04:07 PM CST
#
#

from trainer import Trainer
from util import predict
from sklearn.metrics import accuracy_score
import numpy as np

trainer = Trainer("../module/module.json")
all_theta = trainer.train()

prediction = predict(all_theta, trainer.X_test)

acc = accuracy_score(np.argmax(trainer.y_test, axis=1), prediction)

print "\ntest..."
print "accuracy : %f%%" % (acc * 100)
コード例 #30
0
ファイル: facial.py プロジェクト: xordex28/facialProyect
def reconocimiento(search, templete, datos):
    if (len(templete) > 0 and len(templete[0]) > 0):
        templete = [float(i) for i in templete[0]]
        templete = np.asarray(templete)
        searchs = np.asarray(search['templates'])
        matches = face_recognition.compare_faces(
            searchs, templete, tolerance=0.40)
        face_distances = face_recognition.face_distance(searchs, templete)
        count = 0
        prueba, prueba1 = face_multiple(matches, count)

        # for pos in prueba1:
        #     print('reconocido', prueba, pos, search['doc_ids'][pos], face_distances[pos])

        documentos = []
        promedios = []
        for doc, data in groupby(list(search['doc_ids'])):
            m = [face_distances[i] for i, x in enumerate(
                list(search['doc_ids'])) if x == str(doc)]
            if len(m) > 0:
                documentos.append(doc)
                promedios.append((sum(m)/len(m)))
        first_match_index = np.where(
            min(list(face_distances)) == face_distances)[0][0]
        doc_id = search['doc_ids'][first_match_index]
        distance = face_distances[first_match_index]
        promedios_min = []
        doc_id_min_prom = documentos[promedios.index(
            min(promedios))] if documentos else 'N/A'
        distance_min_prom = min(promedios) if promedios else 'N/A'
        print([face_distances[i] for i, x in enumerate(
            list(search['doc_ids'])) if x == str(doc_id_min_prom)])
        if os.path.exists('modeloknn.clf'):
            predictions, distance_knn = predict(
                datos['url'], model_path="modeloknn.clf")
            promedio = (distance+distance_knn)/2
            for name, (top, right, bottom, left) in predictions:
                if str(name) == str(doc_id_min_prom) and str(name) == doc_id and promedio < 0.43:
                    datos["doc_id"] = str(name)
                    if promedio < 0.25:
                        resp, template_recognition_lentgh = insertPerson(
                            getEncode(datos['url']),
                            str(name),
                            None,
                            None,
                            datos['cliente']
                        )
                        ruta_foto = datos['url']
                        new_nombre = ruta_foto.replace(
                            '.jpg', '-'+str(template_recognition_lentgh)+'.jpg')
                        os.rename(ruta_foto, new_nombre)
                        new_nombre = moveToFotos(new_nombre, str(name))
                        datos['url'] = copyToReconocidos(
                            new_nombre, datos['cliente'])
                    else:
                        datos['url'] = moveToReconocidos(
                            datos['url'], datos['cliente'])
                    print('Reconocido', datos['url'], doc_id, distance, name, distance_knn, promedio,
                          ((distance+distance_knn) * promedio), ' - ', doc_id_min_prom, distance_min_prom)
                    #insertarasistencia(datos['dispositivo'], str(name), datos)
                    return {
                        'descripcion': 'reconocido por knn', 'url': datos['url'], 'cliente': datos['cliente'],
                        'distance': distance, 'distance_knn': distance_knn, 'doc_id_knn': str(name),
                        'doc_id': doc_id, 'distance_min_prom':  distance_min_prom, 'doc_id_min_prom': doc_id_min_prom
                    }
                else:
                    datos['url'] = moveStandBy(datos['url'], datos['cliente'])
                    print('Standby', datos['url'], doc_id, distance, name, distance_knn, promedio,
                          ((distance+distance_knn) * promedio), ' - ', doc_id_min_prom, distance_min_prom)
                    return {
                        'descripcion': 'movido a standby', 'url': datos['url'], 'cliente': datos['cliente'],
                        'distance': distance, 'distance_knn': distance_knn, 'doc_id': doc_id,
                        'doc_id_knn': str(name)
                    }
    else:
        datos['url'] = moveToSinRostro(datos['url'], datos['cliente'])
        print('movido a Sin Rostro', datos['url'], datos['cliente'])
        return {'descripcion': 'movido a sin Rostro', 'url': datos['url'], 'cliente': datos['cliente']}
コード例 #31
0
    util.createSubScatterPlot(util.stdFeature(Xe_norm[:, i]), y, f'Feature {i}', 'Y', 2, 3, i)

plt.xlim(-3, 3)
plt.show()

gpu = np.array([2432, 1607, 1683, 8, 8, 256])
gpu_norm = util.standardize(gpu, feature_mean, feature_std)
gpu_norm_e = np.array([1, gpu_norm[0], gpu_norm[1], gpu_norm[2], gpu_norm[3], gpu_norm[4], gpu_norm[5]])

gpu = np.array([1, 2432, 1607, 1683, 8, 8, 256])
beta = util.calcBeta(Xe, y)
print("Benchmark using normal eq: ", util.normalEq(Xe, y, gpu))
print("Cost function: ", util.cost(Xe, y, beta))
# 12.3964
beta2 = util.calcBeta(Xe_norm, y)
print("Cost function normalized: ", util.cost(Xe_norm, y, beta2))
print("Benchmark on normalized data: ", util.normalEq(Xe_norm, y, gpu_norm_e))


# Implement vectorized version of gradient descent
iterations = 10000
alpha = 0.02
start = np.array([0, 0, 0, 0, 0, 0, 0])

beta = util.minimizeBeta(iterations, alpha, start, Xe_norm, y)
print("GD final cost: ", util.cost(Xe_norm, y, beta))
print(f"Parameters: Alpha={alpha}, Iterations={iterations}")
# 12.50950 after 35 mill iterations
# .0091% of previous cost
print("Benchmark(normalized gd): ", util.predict(gpu_norm_e, beta))
コード例 #32
0
ファイル: test_split.py プロジェクト: haneul/mcdnn
import caffe
import numpy as np
from util import predict

caffe_dir = "../caffe"
MODEL_FILE = caffe_dir + "/models/bvlc_reference_caffenet/deploy.prototxt"
PRETRAINED = caffe_dir + "/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"
IMAGE_FILE = "../cat.png"
with open("synset_words.txt") as f:
    words = f.readlines()
words = map(lambda x: x.strip(), words)

net = caffe.Classifier(MODEL_FILE, PRETRAINED,
                   mean=np.load(caffe_dir + '/python/caffe/imagenet/ilsvrc_2012_mean.npy'),
                   channel_swap=(2,1,0),
                   raw_scale=255,
                   image_dims=(256, 256)) 
net.set_phase_test()
net.set_mode_gpu()
input_image = caffe.io.load_image(IMAGE_FILE)
#print(list(net._layer_names))
predict([input_image], (256, 256), net.crop_dims, net) 
#r = net.forward_all([input_image])

コード例 #33
0
ファイル: predict.py プロジェクト: ricamos/ImageClassifier
parser.add_argument('--gpu', action='store_true', help='use gpu to infer classes')
parser.add_argument('--topk', action = 'store', dest = 'topk', type=int, default = 5, required = False, help = 'Return top K most likely classes')
parser.add_argument('--category_names', action='store', help='Label mapping file')

arguments = parser.parse_args()

try:
    # Use GPU if it's available
    #device = util.choose_device(arguments.gpu)
    
    #loads a checkpoint and rebuilds the model
    model = util.load_checkpoint(arguments.checkpoint_file)
    model.eval()
    
    #Image Preprocessing
    img_file = random.choice(os.listdir(arguments.img_path))
    image_path = arguments.img_path+img_file
    img = util.process_image(image_path)
    
    # Class Prediction
    probs, classes = util.predict(image_path, model, arguments.gpu, arguments.topk)
 
    # Sanity Checking
    cat_to_name = util.cat_to_name(classes, model, arguments.category_names)
    
    for i in range(len(cat_to_name)):
        print(f"class = {cat_to_name[i]} prob = {probs.data[0][i]:.3f}")
    #util.view_classify(image_path, probs, classes, cat_to_name)   
except Exception as e:
    logging.exception("Exception occurred")
    
コード例 #34
0
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
    print("\t{}={}".format(attr.upper(), value))

# model
cnn = model.CNN_Text(args)
if args.cuda:
    torch.cuda.set_device(args.device)
    cnn = cnn.cuda()

# train or predict
if args.predict is not None:
    if args.date != '':
        util.daily_predict(cnn, args)
        output = './input/news/' + args.date[:4] + '/news_' + args.date + '.csv'
        os.system('mv ' + output + '_bak ' + output)
    else:
        mymodels, word2idx, stopWords = util.predictor_preprocess(cnn, args)
        print(util.predict(args.predict, mymodels, word2idx, stopWords, args))
elif args.eval is not False:
    mymodels, word2idx, stopWords = util.predictor_preprocess(cnn, args)
    util.bma_eval(X_test, y_test, mymodels, 'Testing   ', args)
else:
    print()
    try:
        util.train(X_train, y_train, X_valid, y_valid, X_test, y_test, cnn,
                   args)
    except KeyboardInterrupt:
        print('\n' + '-' * 89)
        print('Exiting from training early')
コード例 #35
0
import pickle, json, random
import genData, util

with open('./yelp/user_rwr_matrix.pickle') as f:
    matrix = pickle.load(f)

util.predict(matrix)