def main(path):
    start = time.time()
    os.system('apktool -f d {}'.format(path))
    disassemble_location = './{}/'.format(path[:-4])
    print(
        'Disassembling of the APK Completed in {} seconds'.format(time.time() -
                                                                  start))

    start = time.time()
    call_graph = gen_call_graph(disassemble_location)
    print(
        'Genration of Call Graph Completed in {} seconds'.format(time.time() -
                                                                 start))

    start = time.time()
    sensitive_subgraph = gen_sensitive_subgraph(call_graph)
    print('Genration of Sensitive Subgraph Completed in {} seconds'.format(
        time.time() - start))

    if sensitive_subgraph:
        subgraph_feature_list = extract_subgraph_features(sensitive_subgraph)
    else:
        subgraph_feature_list = [0.0, 0.0]

    permission_feature_list = extract_perm_features(disassemble_location)
    combined_features = subgraph_feature_list + permission_feature_list

    predict(combined_features)
    shutil.rmtree(disassemble_location)
def main():
    parser = argparse.ArgumentParser(description="classify the instrument in the music sample")
    parser.add_argument("-i", dest="filename", required=True, help="input file in WAV format")
    args = parser.parse_args()
    filename = args.filename
    check_format(filename)

    model_instrument = load_model("29_svm_instrument")
    model_family = load_model("29_svm_family")
    scaler = load_model("scaler_instrument")
    print("Family: %s" % predict(model_family, filename, scaler))
    print("Instrument: %s" % predict(model_instrument, filename, scaler))
def test_classifier_no_estimate():
    clf = classifier.make_classifier(use_relaxation_time_estimate=False)
    assert clf is not None

    # Should raise since dragoon_data lacks Half-Mass Relaxation Time
    with pytest.raises(KeyError):
        predictions = classifier.predict(clf, dragoon_data)

    # Let's add some fake values and make sure it works
    dragoon_copy = dragoon_data.copy()
    dragoon_copy['Half-Mass Relaxation Time'] = 10000

    predictions = classifier.predict(clf, dragoon_copy)
    assert len(predictions) == 3
Beispiel #4
0
def main():
    args = parse_inputs(predict=True)

    # Load the model and process image
    # Define whether to use GPU or CPU and move model/img
    model = load_checkpoint(args.checkpoint, args.gpu)
    img = process_image(args.path)

    # Get prediction
    probs, classes = predict(img, model, args.top_k, args.gpu)

    # Load file with category names and classify image
    # Results are printed showing the category names and
    # probabilities
    gap = 40
    precision = 3
    with open(args.category_names, "r") as file:
        print("---- RESULTS ----")
        print("Flower name{}Prob(%)".format((gap - 11) * " "))
        print("-" * (gap + 8))

        flower_dict = json.load(file)
        if classes.ndim < 1:
            name = flower_dict[str(classes)]
            prob = str(round(probs * 100, precision))
            space = "." * (gap - len(name))
            print("{}{}{}%".format(name, space, prob))
        else:
            for idx, val in enumerate(classes.tolist()):
                name = flower_dict[str(val)]
                prob = str(round(probs[idx] * 100, precision))
                space = "." * (gap - len(name))
                print("{}{}{}%".format(name, space, prob))
        print("-" * (gap + 8), end='\n\n')
Beispiel #5
0
def service():
    if request.method == 'POST':
        file = request.files['file']
        file.save('image_test.jpg')

        # Car model classification
        brand, model, veh_type = predict('image_test.jpg')

        #Car plate detection
        detect('image_test.jpg')

        #Car plate recognition
        text, prob = recognize('X000XX000.jpg')
        response = {
            "brand": brand,
            "model": model,
            "probability": prob,
            "veh_type": veh_type,
            "coord": "[(398,292),(573,360)]",
            "id": "0001",
            "plate": text
        }
        response = json.dumps(response, ensure_ascii=False)

        return Response(response=response,
                        status=200,
                        mimetype="application/json")
    return render_template("service.html")
Beispiel #6
0
def func_recommend():
	songs = {}
	while(len(songs)<10):
		
		for key in request.args:
			song = json.loads(key)

		songs = getSongs()
		songs['user'] = str(song.pop('user',))
		songs['playlists'] = int(song.pop('playlists','0'))
		
		playlists = song.items()[0][1]['playlists']
	
		prediction = predict(songs)
		for key in prediction:
			count = 0
			for i in range(len(playlists)):
				if(prediction[key][i]==playlists[i]==1):
					count += 1
			if(count>0):
				songs[key]['playlists'] = prediction[key]
			else:
				songs.pop(key)
		print len(songs)
	return jsonify(songs)
def tag_articles():

    print "- read NE tagged words csv"
    #1. prepare data
    article_df = pd.read_json("./files/NE_words.json")
    article_df = article_df[article_df["sentence"] <=
                            2]  #just for testing purposes

    #2. extract features
    feature_df, words_df = extract_classif_features(article_df)
    #3. predict
    preds = classifier.predict(feature_df)

    #print words_df.head(40)#just for testing purposes
    print "- saving result to json"
    words_df.loc[:, "iob"] = preds

    #read iob labeler
    le_iob = pickle.load(open("./files/iob_tag_encoder.p", "rb"))
    print le_iob.classes_
    # print le_iob.classes_
    words_df.loc[:, "iob"] = le_iob.inverse_transform(words_df.iob)
    words_df.to_json("./files/iob_words.json")

    print words_df

    print "- Done."
Beispiel #8
0
def main():
    in_args = get_input_args()
    
    # Process the input image and run the model prediction
    image = process_image(Image.open(in_args.image_path))
    top_ps, top_class, class_to_idx = predict(image, in_args.checkpoint,
                                              in_args.top_k, in_args.gpu)
    
    flower_to_name = load_json(in_args.category_names)
    
    # Map the class_to_idx labels to flower names from the given JSON file
    flower_name_dict = {}
    for flower, label in class_to_idx.items():
        flower_name_dict[label] = flower_to_name.get(flower)
    
    # Create NumPy 1D arrays out of the probabilities & class labels
    probs = np.atleast_1d(top_ps.data.cpu().numpy().squeeze())
    classes = np.atleast_1d(top_class.data.cpu().numpy().squeeze())
    
    # Convert the labels to flower names & combine with the probabilities
    # then sort them in descending order by the probabilities
    class_labels = [flower_name_dict[class_label] for class_label in classes]
    results = sorted(zip(class_labels, probs), key=lambda x: x[1], reverse=True)
    
    # Print the prediction results
    print()
    for i, (label, prob) in enumerate(results):
        print(f'{i+1}. {label}: {prob*100:.3f}%')
Beispiel #9
0
def result(request):
    """
    Get request from the index page
    Depending on the input type it calls functions to retrieve and process data
    It calls the classifier and get predicted values
    Finally, it displays those values or error message
    :param request:request from index page
    :return:the result html page with results or index html page with error
    """

    if 'BlogURL' in request.POST:
        data_id = getIdByUrl(str(request.POST['BlogURL']))
        if data_id[:5] == "error":
            return render(request, 'Blogger_gender_and_age/index.html',
                          {'msg': "Your input is not valid"})
        else:
            data = getDataByBlogId(data_id)
            if data[:5] == "error":
                return render(request, 'Blogger_gender_and_age/index.html',
                              {'msg': "Your input is not valid"})

    elif 'BlogID' in request.POST:
        data = getDataByBlogId(str(request.POST['BlogID']))
        if data[:5] == "error":
            return render(request, 'Blogger_gender_and_age/index.html',
                          {'msg': "Your input is not valid"})

    elif 'rawText' in request.POST:
        data = str(request.POST['rawText'])
    pred_gender, pred_age = predict(data)
    print pred_gender, pred_age
    return render(request, 'Blogger_gender_and_age/result.html', {
        'pred_age': pred_age,
        'pred_gender': pred_gender
    })
Beispiel #10
0
def service():
    if request.method == 'POST':
        file = request.files['file']

        # Car model classification
        #format: LADA_PRIORA_B
        brand, model, veh_type = predict('image_test.jpg')
        # Car plate detection
        #plate_image = detect('image_path')

        #Car plate recognition
        car_plate = recognize(plate_image)

        response = {
            "brand": brand,
            "model": model,
            "probability": "72.5",
            "veh_type": veh_type,
            "coord": "[(398,292),(573,360)]",
            "id": "0001",
            "plate": "x000xxx111"
        }
        response = json.dumps(response)

        return Response(response=response,
                        status=200,
                        mimetype="application/json")
    return render_template("service.html")
def test_make_classifier():
    clf = classifier.make_classifier()
    assert clf is not None

    predictions = classifier.predict(clf, dragoon_data)
    assert predictions[0] == True
    assert predictions[1] == True
def test_classifier_with_mass_fallback():
    clf = classifier.make_classifier(fallback_enabled=True)
    assert clf is not None

    predictions = classifier.predict(clf, dragoon_data)
    assert predictions[0] == True
    assert predictions[1] == True
Beispiel #13
0
def get_dev_accuracy(classifier,
                     dev_size=1000,
                     randSeed=17,
                     image_path='./mnist/train-images-idx3-ubyte',
                     label_path='./mnist/train-labels-idx1-ubyte'):
    random.seed(randSeed)
    train_images, train_labels = loadlocal_mnist(images_path=image_path,
                                                 labels_path=label_path)

    sortedImages = [[] for _ in range(10)]
    for i in range(len(train_labels)):
        sortedImages[train_labels[i]].append(train_images[i])

    for images in sortedImages:
        random.shuffle(images)

    test_images = []
    test_labels = []
    for i in range(10):
        test_images += sortedImages[i][-dev_size:]
        test_labels += [i for j in range(dev_size)]

    test = (torch.tensor(test_images) - 128.) / 128
    test_labels = torch.tensor(test_labels)
    predictions = classifier.predict(test)
    predictions = predictions.type(torch.uint8)
    return torch.mean(torch.eq(predictions, test_labels).float()).item()
Beispiel #14
0
def classify(audioFile):
    win_size = 0.04
    step = 0.01
    F = []
    # print('reading the audio file....')
    # time.sleep(2)
    data = wave.open(audioFile, 'rb')
    rate = data.getframerate()
    sig = np.fromstring(data.readframes(data.getnframes()), dtype=np.int16)
    # print('done....')
    # print('extracting features from the audio.....')
    features = Extraction.featureExtraction(sig, rate, win_size * rate,
                                            step * rate)
    tmp = np.concatenate((np.mean(features, axis=1), np.std(features, axis=1)))
    # time.sleep(2)
    # print('done.....')
    F.append(tmp)
    F = np.array(F)
    # print('loading the SVM.....')
    classifier = joblib.load('Metadata/classifier.pkl')
    # time.sleep(2)
    pp = joblib.load('Metadata/transformation_module.pkl')
    db = joblib.load('Metadata/database.pkl')
    # print('scaling the data and applying PCA on it....')
    F = pp.standardize_single(F)
    F = pp.project_on_pc_single(F)
    # time.sleep(2)
    # print('the class that the emotion in the audio file belongs to is:')
    ans = classifier.predict(F)
    db.classes = {v: k for k, v in iter(db.classes.items())}
    return int(ans[0])
Beispiel #15
0
def get_res(db):

    res = []
    for url in db:
        title, text = db[url]
        if classifier.predict(sn_classifier, text) == "suitable":
            # only if it is suitable
            pos_sents = []
            sents = nltk.sent_tokenize(text)
            for sent in sents:
                # select the sentences with positive sentiments
                feat = classifier.extract_features(sent)
                out = sentilyzer.prob_classify(feat)
                if out.prob('pos') >= I:
                    pos_sents.append(sent)

            if len(pos_sents) == 0:
                # ignore the news if there are no positive sentences
                continue
            pos_text = " ".join(pos_sents)

            summ_text = s.get_summary(pos_text, k=int(K * len(sents)) + 1)

            read_score = classifier.get_readability(summ_text)
            if read_score >= J:
                res.append((title, summ_text))
    return res
Beispiel #16
0
def test_prediction():
    trained_data = {
        'non_sports_data': {
            'election':
            0.299999999999999988897769753748434595763683319091796875,
            'close': 0.200000000000000011102230246251565404236316680908203125
        },
        'sports_data': {
            'clean': 0.2142857142857142738190390218733227811753749847412109375,
            'game': 0.2142857142857142738190390218733227811753749847412109375,
            'forgettable':
            0.142857142857142849212692681248881854116916656494140625,
            'great': 0.142857142857142849212692681248881854116916656494140625,
            'match': 0.142857142857142849212692681248881854116916656494140625
        }
    }
    prob_of_absence = {
        'sports_data':
        Decimal('0.0714285714285714246063463406244409270584583282470703125'),
        'non_sports_data':
        Decimal('0.1000000000000000055511151231257827021181583404541015625')
    }
    test_dict = {'sports data': {'game': None, 'match': None}}

    assert classifier.predict(
        trained_data, test_dict, demo_train_data, prob_of_absence) == {
            'sports data': {
                'non_sports_data': Decimal('0.004000000000000000666133814773'),
                'sports_data': Decimal('0.01836734693877550748516810296')
            }
        }
Beispiel #17
0
def gen_profile(vect=None):
    '''
    Input lifestyle vector return person obj with income, expenditure,
    purchase amount by category.
    '''
    sav_rating = determine_saving_rating()
    gender = random.choice(['male', 'female'])
    username = gen.generate_name().lower()
    if not vect:
        if gender == 'male':
            vect = gc.generate_test_cases(1)[0]
        else:
            vect = gc.generate_test_cases(1)[1]
    trans_hist = create_transaction_history(vect)
    income, saving = determine_inv_sav(trans_hist, sav_rating)
    email = gen.generate_email(username)
    age = str(random.choice(list(range(16, 81))))
    rent = determine_rent()
    category = str(predict(vect))
    utilities = determine_utility(rent)
    Location = random.choice(
        ['Toronto', 'Calgary', 'Vancover', 'Montreal', 'Edmonton'])
    password = "******"
    return Person(category, username, gender, password, email, age, income,
                  saving, rent, utilities, trans_hist)
Beispiel #18
0
def predict():
    """
    Predicts the text label of every value in the given list of unlabeled text.
    """
    try:
        id_token = request.form['id_token']
        uid = verify_id_token(id_token)
    except KeyError:
        return "id_token required", status.HTTP_400_BAD_REQUEST
    except ValueError:
        return "id_token unrecognized", status.HTTP_400_BAD_REQUEST
    except auth.AuthError as exc:
        if exc.code == 'ID_TOKEN_REVOKED':
            return "id_token revoked", status.HTTP_400_BAD_REQUEST
        else:
            return "id_token invalid", status.HTTP_400_BAD_REQUEST
    try:
        unlabeled_text = json.loads(request.form['unlabeled_text'])
    except KeyError:
        return "unlabeled_text required", status.HTTP_400_BAD_REQUEST
    except ValueError:
        return "unlabeled_text unrecognized", status.HTTP_400_BAD_REQUEST
    predicted_labels = classifier.predict(uid, unlabeled_text.values())
    predictions = dict(zip(unlabeled_text.keys(), predicted_labels))
    return json.dumps(predictions), status.HTTP_200_OK
Beispiel #19
0
def show_frame():
    _, frame = cap.read()
    frame = cv2.flip(frame, 1)
    cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    filename = "lbpcascade_frontalface.xml"
    clf = cv2.CascadeClassifier(filename)
    faces = clf.detectMultiScale(cv2image)

    for face in faces:
        (x, y, w, h) = face
        if w > 100:
            cv2.rectangle(cv2image, (x, y), (x + w, y + h), (255, 0, 0), 2)
            horizontal_offset = 0.15 * w
            vertical_offset = 0.2 * h
            extracted_face = cv2image[
                int(y + vertical_offset):int(y + h),
                int(x + horizontal_offset):int(x - horizontal_offset + w)]
            new_extracted_face = zoom(
                extracted_face,
                (64. / extracted_face.shape[0], 64. / extracted_face.shape[1]))

            res = svm.predict([new_extracted_face.ravel()])

            if res == 1:
                text.config(text="True")
            else:
                text.config(text="False")

    img = Image.fromarray(cv2image)
    imgtk = ImageTk.PhotoImage(image=img)
    lmain.imgtk = imgtk
    lmain.configure(image=imgtk)
    lmain.after(10, show_frame)
Beispiel #20
0
def results(data, data1):
    text_to_classify = data
    level = int(data1)
    pred = predict(text_to_classify, level)
    return render_template('results.html',
                           pred=pred,
                           text_to_classify=text_to_classify)
Beispiel #21
0
def func_predict():
	for key in request.args:
			song = json.loads(key)
	prediction = predict(song)

	for key in prediction:
		song[str(key)]['playlists'] = prediction[key]
	return jsonify(song)
Beispiel #22
0
def evaluate(train_set, test_set, classifier):
    classifier.train(train_set)
    predictions_list = classifier.predict(test_set)
    acc = get_acc(test_set, predictions_list)
    sens = get_sensivity(test_set, predictions_list)
    spec = get_specifity(test_set, predictions_list)
    prec = get_precision(test_set, predictions_list)
    fmeas = get_fmeas(test_set, predictions_list)
    return acc, sens, spec, prec, fmeas
def index():
    if request.method == "GET":
        return render_template('index.html')

    elif request.method == "POST":

        f = request.files['file']
        errorMessage = None
        imgUrl = request.form.get('imgUrl')
        if f:
            print('image is uploaded!')
            img = f.read()
            img = Image.open(io.BytesIO(img))
            img = img.convert('RGB')
        elif imgUrl:
            print('image url is given!')
            try:
                img = Image.open(urllib.request.urlopen(imgUrl, timeout=10))
                img = img.convert('RGB')
            except (HTTPError, URLError) as error:
                errorMessage = error
            except timeout:
                errorMessage = 'Oops, Timed out, while retrieving the image. Try with a diffrent image!'
            else:
                print('Image read successfully!')

        else:
            return render_template('index.html',
                                   data={'message': 'No files were given!'})

        if (errorMessage is not None):
            return render_template('index.html',
                                   data={'message': errorMessage})
        else:
            img = img_to_array(img)
            result = C.predict(img)
            # build the image
            img_object = io.BytesIO()
            array_to_img(result['new_image']).save(img_object, 'JPEG')
            # img_object.seek(0)
            img_64 = base64.b64encode(img_object.getvalue())
            img_encoded = u'data:img/jpeg;base64,' + img_64.decode('utf-8')

            # Remove underscores in prediction
            predicted_category = re.sub('_', ' ', result['res'][1])

            data = {
                'result': {
                    'category': predicted_category,
                    'confidence': result['res'][2]
                },
                'image': img_encoded,
                'errors': []
            }

            return render_template('index.html', data=data)
Beispiel #24
0
def get_test_accuracy(classifier,
                      image_path='./mnist/t10k-images-idx3-ubyte',
                      label_path='./mnist/t10k-labels-idx1-ubyte'):
    test_images, test_labels = loadlocal_mnist(images_path=image_path,
                                               labels_path=label_path)
    test = (torch.tensor(test_images) - 128.) / 128
    test_labels = torch.tensor(test_labels)
    predictions = classifier.predict(test)
    predictions = predictions.type(torch.uint8)
    return torch.mean(torch.eq(predictions, test_labels).float()).item()
Beispiel #25
0
def getKFoldMetrics(X, Y, k=5):
    metrics = numpy.zeros(5)
    validator = KFold(n_splits=k, random_state=RANDOM_STATE)
    for trainingIndex, testingIndex in validator.split(X):
        predictions, probabilities = classifier.predict(
            X[trainingIndex], Y[trainingIndex], X[testingIndex])
        metrics += classifier.getMetrics(Y[testingIndex], predictions,
                                         probabilities[:, 1])

    metrics /= k  # average results from each fold
    return metrics
def generateNotes(notesMdls, velMdls, timeMdls, length, initNotes):

    newNotes = []
    if (len(initNotes) != 0):
        size = len(initNotes)
        for notex in initNotes:
            newNotes.append(notex)
        for i in range(length):
            unlabelled = []
            for j in range(i, i + size):
                unlabelled.append(newNotes[j].note)
                unlabelled.append(newNotes[j].velocity)
                unlabelled.append(newNotes[j].time)

            npUnlabelled = np.array(unlabelled).reshape(1, -1)
            if (len(velMdls) > 1):
                velMdl = velMdls[np.random.randint(len(velMdls))]
                notesMdl = notesMdls[np.random.randint(len(notesMdls))]
                timeMdl = timeMdls[np.random.randint(len(timeMdls))]
                n = note(predict(velMdl, npUnlabelled),
                         predict(notesMdl, npUnlabelled),
                         predict(timeMdl, npUnlabelled))
            else:
                n = note(predict(velMdls[0], npUnlabelled),
                         predict(notesMdls[0], npUnlabelled),
                         predict(timeMdls[0], npUnlabelled))
            newNotes.append(n)
    else:
        # Generate a random sequence of initial notes.
        print("Error!")

    return newNotes[len(initNotes):]
Beispiel #27
0
def application(environ, start_response):
    request = str(environ['REQUEST_URI']).split("?")
    acidez = request[1]
    ph = request[2]
    alcohol = request[3]
    output = c.predict(acidez, ph, alcohol)

    response_headers = [('Content-type', 'text/plain'),
                        ('Content-Length', str(len(output)))]
    status = '200 OK'
    start_response(status, response_headers)

    return [bytes(output, 'utf-8')]
def home():
    path = "static/img/types-of-female-models.png"
    if request.method == "POST":
        if request.files:
            image = Image.open(request.files["image"])
            image = classifier.predict(image)
            #path = os.path.join( app.static_folder, "/SYS_TEMP", utils.get_path())
            path = utils.get_path()
            image.save(path)
            #return redirect(request.url)
            return render_template('index.jinja2', path=path)

    return render_template('index.jinja2', path=path)
 def post(self):
     can_img0 = self.get_argument(name='imgBase64')
     can_img1 = can_img0[can_img0.find(',') + 1:]
     can_img2 = Image.open(StringIO(base64.decodestring(can_img1)))
     test = multi_to_single_channel(numpy.array(can_img2), 3)
     full_image = Image.fromarray(numpy.array(test).reshape(200, 400),mode="L").transpose(Image.FLIP_LEFT_RIGHT)
     full_image = full_image.transpose(Image.ROTATE_270)
     vectors = py_dbscan.get_vectors(full_image,1,1)
     s=""
     for vector in vectors:
         s=str(classifier.predict(vector)[0])+s
     json_obj = {}
     json_obj['ret']= s
     self.write(json_obj)
Beispiel #30
0
def result_upload():
    filename = os.path.join(app.config['UPLOAD_FOLDER'],
                            request.url.split('=')[-1])

    file_handle = open(filename)
    data = file_handle.read()
    file_handle.close()

    result = classifier.predict(text_clf, data)
    if result[0]:
        result = 'positive'
    else:
        result = 'negative'
    return render_template('result_upload.html', result=result)
    def on_message(self, message):
        msg = json.loads(message)
        print(msg)
        if msg['type'] == "predict":
            results = classifier.predict(msg['path'])

            # All connected clients receive the result.
            [client.write_message(results) for client in self.connections]
        else:
            # All connected clients receive the error.
            [
                client.write_message('Unhandled message')
                for client in self.connections
            ]
Beispiel #32
0
features_train = train.drop(PREDICTION_COLNAME, axis=1)
features_valid = valid.drop(PREDICTION_COLNAME, axis=1)
target_train = train[PREDICTION_COLNAME]
target_valid = valid[PREDICTION_COLNAME]



## SELECT FEATURES
### this is supposed to be step1 in sklearn pipeline, but pipeline bugs with python-2.7
print "selecting features..."
features_train, features_valid = selector.reduce_dimension(features_train, features_valid)


### PREDICT
print "learning parameters and predicting target..."
prediction = classifier.predict(features_train, target_train, features_valid)


## MEASURE OF SUCCESS, PLOT CONTROL
print_score(target_valid, prediction)
#visualize.compare_results(prediction, validation)