Ejemplo n.º 1
0
def classify_snap(img):

    img = cv2.resize(img, dsize=(300, 300), interpolation=cv2.INTER_CUBIC)
    img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])
    img = img / 255.0
    className = classify.predict(model, img)
    classLabel["text"] = className
Ejemplo n.º 2
0
def check(*args):
    file_path = path.get()
    result = predict('model/classifier.pkl', 'model/features.pkl', file_path)
    if result:
        tkinter.messagebox.showinfo('INFO', message='No threat found')
    else:
        tkinter.messagebox.showwarning('WARNING', message='Malware detected!')
Ejemplo n.º 3
0
def post_file(filename):
    """Upload a file."""

    # headers = request.headers
    # auth = headers.get("X-Api-Key")
    # if auth != 'mskib0102':
    #     return jsonify({"message": "ERROR: Unauthorized"}), 401

    if "/" in filename:
        # Return 400 BAD REQUEST
        abort(400, "no subdirectories directories allowed")

    fullFileName = os.path.join(UPLOAD_DIRECTORY, filename)
    with open(fullFileName, "wb") as fp:
        fp.write(request.data)

    try:
        return "Success", classify.predict(fullFileName)
    except ValueError as e:
        print("Value error \n {0}".format(e))
    except AttributeError as a:
        print("Value error \n {0}".format(a))
    except:
        print("Unexpected error:", sys.exc_info()[0])
        abort(500, "Internal Server Error")

    # Return 201 CREATED
    abort(500, "Internal Server Error")
Ejemplo n.º 4
0
def predict_many(classify_request):
    tweets = classify_request.tweets
    predictions = []
    for tweet in tweets:
        prediction = predict(tweet.message)
        prediction_result = PredictionResult(tweet.id, prediction)
        predictions.append(prediction_result.get_dict())
    return predictions
Ejemplo n.º 5
0
def main():
    audio_filename = input("Please choose an audio file(test/1-10.mp3): ")
    demo_data = feature_extract(audio_filename)
    print 'processed data.'
    model_params = {'pca_n': 10, 'knn_k': 5, 'knn_metric': 'minkowski'}
    #  train_and_test(data, [model_params, 'svc'])
    model = classify.load_model(model_params)
    pre = classify.predict(model, demo_data, [model_params, 'svc'])
    result(pre)
Ejemplo n.º 6
0
def main():
    st.set_option('deprecation.showfileUploaderEncoding', False)
    st.title("Birds Around Here")

    uploaded_file = st.file_uploader("Choose an image of a bird...")
    if uploaded_file is not None:
        image = np.asarray(Image.open(uploaded_file))
        st.image(image, caption='Uploaded Image.', use_column_width=True)
        st.write("")
        st.write("Classifying...")
        label, prob = predict(image)
        st.write(f"{label}, prob={prob*100}")
Ejemplo n.º 7
0
def predictor(audio_filename):
    #audio_filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("mp3 files","*.mp3"),("all files","*.*")))
    #input("Please choose an audio file(test/1-10.mp3): ")
    print audio_filename
    demo_data = feature_extract(audio_filename)
    print 'processed data.'
    model_params = {'pca_n': 10, 'knn_k': 5, 'knn_metric': 'minkowski'}
    #  train_and_test(data, [model_params, 'svc'])
    model = classify.load_model(model_params)
    pre = classify.predict(model, demo_data, [model_params, 'svc'])
    result(pre)
    return pre
Ejemplo n.º 8
0
def profileData(clfFile, X, y, threshold=0.5):
    
    y = np.squeeze(y)
    pred = predict(clfFile, X)
    
    positives = pred[y==1]
    negatives = pred[y==0]
    
    true_pos = np.where(positives >= threshold)[0]
    false_neg = np.where(positives < threshold)[0]
    
    true_neg = np.where(negatives < threshold)[0]
    false_pos = np.where(negatives >= threshold)[0]
    
    return true_pos, false_neg, true_neg, false_pos, pred
Ejemplo n.º 9
0
 def handle_data(self, data_object):
     # parse JSON data
     client_data = json.loads(data_object)
     filenames = ['circle0.svg','circle1.svg','circle2.svg','circle3.svg','morphism0.svg','morphism1.svg','morphism2.svg']
     labels = [1,1,1,1,-1,-1,-1]
     nodeclf = clf.trainSVM(filenames,labels,1,1)
     paths_str = client_data[u'pathdata'].split('\n')[1:]
     pathlist = [celm.path(cp.parseStroke(path_str)) for path_str in paths_str]
     hypotheses = sgm.segmentPath(pathlist,train=False)
     [corr_pathlist,intersect] = sgm.correctPathlist(pathlist)        
     normal = clf.predict(hypotheses,nodeclf)
     if corr_pathlist!=None:
         correction = clf.predict(corr_pathlist,nodeclf)
     else:
         correction = None           
     [wirelist,dotlist,morphismlist] = cn.findWinner(normal,correction,intersect)
     tree = sp.loadFile('blank.svg')
     cn.drawOutput(tree,wirelist,dotlist,morphismlist)
     try:
         latex_command = td.create_diagram(dotlist,morphismlist,wirelist)
     except:
         pass
     response_data = json.dumps(latex_command)
     return response_data
Ejemplo n.º 10
0
def profileData(clfFile, X, y, threshold=0.5):

    y = np.squeeze(y)
    pred = predict(clfFile, X)

    positives = pred[y == 1]
    negatives = pred[y == 0]

    true_pos = np.where(positives >= threshold)[0]
    false_neg = np.where(positives < threshold)[0]

    true_neg = np.where(negatives < threshold)[0]
    false_pos = np.where(negatives >= threshold)[0]

    return true_pos, false_neg, true_neg, false_pos, pred
Ejemplo n.º 11
0
def main():
  st.title("Luekemia Cell Detector")
  folder_path = '.\\New folder'
  filename = file_selector(folder_path=folder_path)
  st.write('You selected `%s`' % filename[13:])
  img_u = cv2.imread(filename)
  img = cv2.resize(img_u,(400,400))
  st.image(img, caption='Uploaded Image', use_column_width=True)
  if st.button('predict'):

    st.write("**Processing.....**")
    label = classify.predict(img)
    label = label.item()
    res = abbreviation_dict.get(label)
    st.markdown(res)
def apply_ML(data, sources):

    decsion_boundary = 0.159

    # prepare data for machine learning
    print "[*] Preparing data for machine learning."
    X = prepare_data_for_ML(data, sources)

    pooledFeatures = convolve_and_pool(X)

    print "[*] Applying feature scaling."
    tmp = sio.loadmat("/Users/dew/development/PS1-Real-Bogus/ufldl/sparsefiltering/features/"+\
                      "SF_maxiter100_L1_3pi_20x20_skew2_signPreserveNorm_6x6_k400_patches_"+\
                      "stl-10_unlabeled_meansub_20150409_psdb_6x6_pooled5.mat")["pooledFeaturesTrain"]
    tmp = np.transpose(tmp, (0, 2, 3, 1))
    numTrainImages = np.shape(tmp)[3]
    tmp = np.reshape(tmp, (int((tmp.size)/float(numTrainImages)), \
                           numTrainImages), order="F")
    scaler = preprocessing.MinMaxScaler()
    scaler.fit(tmp.T)  # Don't cheat - fit only on training data
    tmp = None

    X = np.transpose(pooledFeatures, (0, 2, 3, 1))
    numImages = np.shape(X)[3]
    X = np.reshape(X, (int((pooledFeatures.size)/float(numImages)), \
                       numImages), order="F")
    X = scaler.transform(X.T)
    clfFile = "/Users/dew/development/PS1-Real-Bogus/ufldl/sparsefiltering/classifiers/"+\
              "SVM_linear_C1.000000e+00_SF_maxiter100_L1_3pi_20x20_skew2_signPreserveNorm"+\
              "_6x6_k400_patches_stl-10_unlabeled_meansub_20150409_psdb_6x6_pooled5.pkl"
    print "[*] Making predictions."
    pred = predict(clfFile, X)
    m = len(np.where(pred > decsion_boundary)[0])
    print "[*] %d quality detections passing machine learning threshold (5%% FoM)." % (
        m)
    """
    print "[*] Plotting quality detections."
    dim = np.ceil(np.sqrt(m))
    fig = plt.figure()
    for i, index in  enumerate(np.where(pred > decsion_boundary)[0]):
        ax = fig.add_subplot(dim,dim,i+1)
        position = (sources['xcentroid'][index],sources['ycentroid'][index])
        img = data[position[1]-10:position[1]+10,position[0]-10:position[0]+10]
        ax.imshow(img, cmap="gray_r", interpolation="nearest", origin="lower")
        plt.axis("off")
    plt.show()
    """
    return np.where(pred > decsion_boundary)[0]
Ejemplo n.º 13
0
def classify():
    data = json.loads(request.get_data())
    #print(data)
    if data == None:
        return 'Got None'
    else:
        image = eval(data["image"])
        #print(image)
        prediction = predict(image)
        result = prediction.split()

    return jsonify({
        'Assembly': result[0],
        'Orientation': result[1],
        'Face': result[2]
    })
Ejemplo n.º 14
0
def apply_ML(data, sources):
    
    decsion_boundary = 0.159
    
    # prepare data for machine learning
    print "[*] Preparing data for machine learning."
    X = prepare_data_for_ML(data, sources)

    pooledFeatures = convolve_and_pool(X)

    print "[*] Applying feature scaling."
    tmp = sio.loadmat("/Users/dew/development/PS1-Real-Bogus/ufldl/sparsefiltering/features/"+\
                      "SF_maxiter100_L1_3pi_20x20_skew2_signPreserveNorm_6x6_k400_patches_"+\
                      "stl-10_unlabeled_meansub_20150409_psdb_6x6_pooled5.mat")["pooledFeaturesTrain"]
    tmp = np.transpose(tmp, (0,2,3,1))
    numTrainImages = np.shape(tmp)[3]
    tmp = np.reshape(tmp, (int((tmp.size)/float(numTrainImages)), \
                           numTrainImages), order="F")
    scaler = preprocessing.MinMaxScaler()
    scaler.fit(tmp.T)  # Don't cheat - fit only on training data
    tmp = None

    X = np.transpose(pooledFeatures, (0,2,3,1))
    numImages = np.shape(X)[3]
    X = np.reshape(X, (int((pooledFeatures.size)/float(numImages)), \
                       numImages), order="F")
    X = scaler.transform(X.T)
    clfFile = "/Users/dew/development/PS1-Real-Bogus/ufldl/sparsefiltering/classifiers/"+\
              "SVM_linear_C1.000000e+00_SF_maxiter100_L1_3pi_20x20_skew2_signPreserveNorm"+\
              "_6x6_k400_patches_stl-10_unlabeled_meansub_20150409_psdb_6x6_pooled5.pkl"
    print "[*] Making predictions."
    pred = predict(clfFile, X)
    m = len(np.where(pred > decsion_boundary)[0])
    print "[*] %d quality detections passing machine learning threshold (5%% FoM)." % (m)
    """
    print "[*] Plotting quality detections."
    dim = np.ceil(np.sqrt(m))
    fig = plt.figure()
    for i, index in  enumerate(np.where(pred > decsion_boundary)[0]):
        ax = fig.add_subplot(dim,dim,i+1)
        position = (sources['xcentroid'][index],sources['ycentroid'][index])
        img = data[position[1]-10:position[1]+10,position[0]-10:position[0]+10]
        ax.imshow(img, cmap="gray_r", interpolation="nearest", origin="lower")
        plt.axis("off")
    plt.show()
    """
    return np.where(pred > decsion_boundary)[0]
Ejemplo n.º 15
0
def analyze_user(twitter_handler, reddit_handler):
    def _remove_time(s):
        return " ".join(s.split("|")[:-1])

    sentences = []

    if twitter_handler is not None:
        try:
            sentences = sentences + twitterapi.get_user_tweets(twitter_handler)
        except:
            print("USER DNE")
    if reddit_handler is not None:
        try:
            sentences = sentences + reddit.get_user_activity(reddit_handler)
        except:
            print("USER DNE")

    y, merged_y = predict(sentences)

    return merged_y[0][1]
Ejemplo n.º 16
0
def query_image(path):
    # TODO: query image
    path = os.path.join(app.config['UPLOAD_FOLDER'], path)
    if os.path.isfile(path):
        # TODO: query
        # key = "865c2ba"  # only for demonstration
        key = predict(path)[0]
        if key.startswith("w_"):
            key = key[2:]
        if key in label_dict:
            files = label_dict[key]
            urls = ["train/" + f for f in files]
            random.shuffle(urls)
            return jsonify(urls)
        else:
            return "no matched images"
        # TODO: delete, for frontend test
        # print(path)
        # urls = [path,path,path,path]
        # return jsonify(urls)
    else:
        return "file invalid"
Ejemplo n.º 17
0
def main():
    st.title("Dogs/Cats Classification")
    html_temp = """
    <div style="background-color:tomato;padding:15px;"
    <h1> With Convoultion Neural Networks </h1>
    </div>
    """
    st.markdown(html_temp, unsafe_allow_html=True)

    uploaded_file = st.file_uploader("Upload an image...", type=["jpg", "png"])

    if uploaded_file is not None:
        file = uploaded_file.read()
        file_bytes = np.asarray(bytearray(file), dtype=np.uint8)
        img_cv2 = cv2.imdecode(np.fromstring(file, np.uint8), 1)
        img = Image.open(uploaded_file)
        st.image(img, caption='Uploaded Image.', use_column_width=True)
        pred = predict(img_cv2)
        if pred == 1:
            st.success("Bow Bow, it's a DOG")
        else:
            st.success("Meow, it's a CAT")
        st.balloons()
Ejemplo n.º 18
0
st.title(" ")

st.subheader(
    'Upload image of Sunflower,Daisy,Dandelion,Roses  ,Sunflowers  or  Tulips')

st.title(" ")
st.title(" ")

uploaded_file = st.file_uploader(".")
if uploaded_file is not None:

    image = Image.open(uploaded_file)

    lol = load_img(uploaded_file, target_size=(224, 224))
    lol = img_to_array(lol)

    lol = lol / 225.0
    lol = np.array([lol])

    st.image(image, use_column_width=True)

    st.write("")

    if st.button('predict'):

        final_prediction = classify.predict(lol)

        st.success(
            f'The flower in the picture is most likely  {final_prediction} ')
Ejemplo n.º 19
0
st.write("""
# Simple VGG16 Model Classifier

""")

import streamlit as st
import os

def file_selector(folder_path='./images'):
    filenames = os.listdir(folder_path)
    selected_filename = st.selectbox('Select your Image', filenames)
    return os.path.join(folder_path, selected_filename)

filename = file_selector()
st.write('You selected `%s`' % filename)

# upload_img = st.file_uploader(label='Upload your Image', type=['png', 'jpg'])



if filename:
	img = Image.open(filename)
	st.image(img, caption="Your Image", use_column_width=True)
	st.write("Classifying...")
	label = predict(filename)
	st.write('The image is %s with %.2f%% probabiity' % (label[1], label[2]*100))


# if upload_img is not None:
#     file_bytes = np.asarray(bytearray(upload_img.read()), dtype=np.uint8)
#     opencv_image = cv2.imdecode(file_bytes, 1)
st.write("")

classes = {
        0: 'Normal Chest Xray',
        1: 'Pneumonia Detected'
      }

st.title("Pneumonia Detection - Machine Learning")
st.write("")
st.write("")
st.subheader("By Jeevan Thukrul")
uploaded_file = st.file_uploader("Choose an image...", type=["jpg","png","jpeg"])
if uploaded_file is not None:

        image = Image.open(uploaded_file)
        st.image(image, caption='Uploaded Image', use_column_width=True)



        if st.button('predict'):
                st.write("Result...")
                label = classify.predict(uploaded_file)
                label = label.item()
                res = classes.get(label)
                st.success(res)

                img1 = "Output/cnn_acc.png"
                img2 = "Output/cnn_loss.png"
                st.image(img1)
                st.image(img2)
Ejemplo n.º 21
0
import cv2
import sys
import os
import glob
import classify as alexnet

pathname = sys.argv[1]

top_dirs = glob.glob(pathname + "*/")
for top_dir in top_dirs:
    imgs = glob.glob(top_dir + "*.jpg")
    dirs = glob.glob(top_dir + "*/")
    f1 = open(top_dir + 'classification.txt', 'w')
    
    #classify each image
    for im_file in imgs:
        alexnet.predict(im_file, f1, "jpg")
    f1.close()
        
    #run through each dir
    for a_dir in dirs:
        distorted_imgs = glob.glob(a_dir + "*.png")
        f2 = open(a_dir + 'classification.txt', 'w')
        
        #classify each image
        for dist_file in distorted_imgs:
            alexnet.predict(dist_file, f2, "png")
        f2.close()
Ejemplo n.º 22
0
import streamlit as st
from PIL import Image
from classify import predict

st.title("Hotdog Classifier")

file = st.file_uploader("Upload an image...", type=["png", "jpg"])
if file is not None:
    image = Image.open(file)
    st.image(image, caption="Uploaded image", use_column_width=True)
    st.write("")
    st.write("Evaluating...")
    label = predict(file)
    if label[1] == "hotdog":
        st.write("✅ Hotdog")
    else:
        st.write("❌ Not hotdog")
Ejemplo n.º 23
0
def converter(value):
    if value == 0:
        return "infected"
    else:
        return "uninfected"

st.title("Upload + MALARIA IMAGE")
st.text("NAME : OMONIYI TEMIDAYO ANDREW")
st.text("TEAM : TEAM WAKANDA")
st.text("EMAIL: [email protected], [email protected]")
st.text("NO:    +2348166220117")
html_temp="""
<div style="background-color:gray;padding:15px;">
<h2>RGB IMAGE OF BLOOD SAMPLE</h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)

uploaded_file = st.file_uploader("Choose an image...", type=("jpeg","jpg","png"))
if uploaded_file is not None:
    image = Image.open(uploaded_file)
    st.image(image, caption='Uploaded Image.', use_column_width=True)
    st.write("")
    st.write("Classifying...")
    converter = predict(uploaded_file)
    st.write('I THINK:', converter)




Ejemplo n.º 24
0
def predict_demo(bucket_name, slice_id):

    opt = parse_opts()
    opt.reload_path = os.path.join(opt.root_path, opt.reload_path)
    opt.save_path = os.path.join(opt.root_path, opt.data_path)

    #download dcm files form S3
    download_dcm(bucket_name, slice_id, opt.save_path)

    #TODO:此处提取npy, mask,info 可以改写成多线程.因为数据提取非常耗时
    #preprocess dcm files
    dcm_file = os.path.join(opt.save_path, bucket_name)
    image, data, mask, info = get_patient_ct_data_mask_info_mhd(dcm_file, opt)
    '''#########################################################################'''
    # reload trained deeplab model used to make segment results#
    '''#########################################################################'''
    seg_beg_time = time.time()
    seg_reload_file = os.path.join(opt.reload_path, opt.seg_reload_model)
    seg_model, _ = model.generate_model(opt, phase='segment')
    checkpoint = torch.load(seg_reload_file)
    state_dict = checkpoint['state_dict']
    if not opt.no_cuda:  #使用GPU
        seg_model.load_state_dict(state_dict)
    else:  #使用CPU:
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:]  # remove `module.`
            new_state_dict[name] = v
        seg_model.load_state_dict(new_state_dict)
    print('reload segment model cost time:', time.time() - seg_beg_time)

    seg_loader = get_seg_inputs(data, opt)
    print('get segment data cost time:', time.time() - seg_beg_time)

    seg_preds = segment.predict(seg_loader, seg_model, opt)
    seg_results = get_seg_outputs(seg_preds, mask, opt.seg_pred_thresh)
    print('segment cost time:', time.time() - seg_beg_time)
    '''#########################################################################'''
    # reload trained resnet model used to make classify results#
    '''#########################################################################'''

    cla_beg_time = time.time()
    cla_reload_file = os.path.join(opt.reload_path, opt.cla_reload_model)
    cla_model, _ = model.generate_model(opt, phase='classify')

    # 'loading checkpoint
    checkpoint = torch.load(cla_reload_file)
    state_dict = checkpoint['state_dict']
    if not opt.no_cuda:  #使用GPU
        cla_model.load_state_dict(state_dict)
    else:  #使用CPU:
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:]  # remove `module.`
            new_state_dict[name] = v
        cla_model.load_state_dict(new_state_dict)
    print('reload classify model cost time:', time.time() - cla_beg_time)

    cla_loader = sigthread_get_cla_inputs(data, seg_results, opt)
    print('get classify data cost time:', time.time() - cla_beg_time)

    cla_prob, cla_pred = classify.predict(cla_loader, cla_model, opt)
    print('classify cost time:', time.time() - cla_beg_time)

    ID = dcm_file.split('/')[-1]
    results = get_cla_outpus(cla_prob, cla_pred, seg_results, info, ID)
    results.to_csv('results.csv', index=False)
    print('predict cost all time:', time.time() - load_data_beg_time)
    show_predict_results(image, results, opt.save_path)
Ejemplo n.º 25
0
st.write(
    "This is a simple image classification web app to predict wheather a person is suffering from covid-19 or not"
)

file = st.file_uploader("Please upload an image file",
                        type=["jpg", "png", "jpeg"])
buffer = file
temp_file = NamedTemporaryFile(delete=False)
if buffer:
    temp_file.write(buffer.getvalue())
    st.write(image.load_img(temp_file.name))

if file is None:
    st.text("You haven't uploaded an image file")
else:
    image = Image.open(temp_file)
    st.image(image, caption='Uploaded Image', use_column_width=True)

    st.write("")

    if st.button('predict'):
        st.write("Result...")
        label = classify.predict(temp_file)
        if label >= 0.5:
            st.write(
                "congrats!!! scan shows it's COVID-19 Negative... \n Stay safe"
            )
        else:
            st.write("Sorry scans shows it's COVID-19")
Ejemplo n.º 26
0
import streamlit as st
from PIL import Image
from classify import predict

st.title("Upload + Classification Example")

uploaded_file = st.file_uploader("Choose an image...", type="jpg")
if uploaded_file is not None:
    image = Image.open(uploaded_file)
    st.image(image, caption='Uploaded Image.', use_column_width=True)
    st.write("")
    st.write("Classifying...")
    label = predict(uploaded_file)
    st.write(label)
    st.write(label[:, 0])
    #st.write('%s (%.2f%%)' % (label[0], label[0]*100))
Ejemplo n.º 27
0
import cv2
import sys
import os
import glob
import classify as alexnet

pathname = sys.argv[1]

top_dirs = glob.glob(pathname + "*/")
for top_dir in top_dirs:
    imgs = glob.glob(top_dir + "*.jpg")
    dirs = glob.glob(top_dir + "*/")
    f1 = open(top_dir + 'classification.txt', 'w')

    #classify each image
    for im_file in imgs:
        alexnet.predict(im_file, f1, "jpg")
    f1.close()

    #run through each dir
    for a_dir in dirs:
        distorted_imgs = glob.glob(a_dir + "*.png")
        f2 = open(a_dir + 'classification.txt', 'w')

        #classify each image
        for dist_file in distorted_imgs:
            alexnet.predict(dist_file, f2, "png")
        f2.close()
Ejemplo n.º 28
0
import cluster
import itertools

INPUT_FILE = "input.log"
FEATURES_FILE = "features.csv"
RESULT_FILE = "clusterResult.csv"
EXTRACTED_SECTIONS_FILE = "detectedSections.log"
LABEL_COLUMN_NAME = "label"
LINE_NUMBER_COLUMN_NAME = "line_number"

features.extractFeatures(inputFile=INPUT_FILE, outputFile=FEATURES_FILE)

unlabelledSet = pandas.read_csv(FEATURES_FILE, skipinitialspace=True, header=0)

unlabelledSet[LABEL_COLUMN_NAME] = pandas.DataFrame(
    classify.predict(unlabelledSet), columns=[LABEL_COLUMN_NAME])

#unlabelledSet[LABEL_COLUMN_NAME] = pandas.read_csv("labels.csv", skipinitialspace=True, header=0)

labelledSet = unlabelledSet

#classify.train(labelledSet, 1000)

#classify.evaluate(labelledSet, 1)

type10 = cluster.kMeans(10, labelledSet, columnPrefix="type")
type100 = cluster.kMeans(100, labelledSet, columnPrefix="type")
type1000 = cluster.kMeans(1000, labelledSet, columnPrefix="type")

offsetValue = 10
Ejemplo n.º 29
0
import os
import sys
import cv2
import imutils
import numpy as np
import matplotlib.pyplot as plt
import pickle
from classify import predict
folder = sys.argv[1]
####################################################3
pieces = []  # from H8 = pieces[0] to A1 = pieces[63]
path = f'C:\\Users\\Jerry\\sys\\{folder}'
listing = os.listdir(path)
for file in listing:
    filepath = f'C:\\Users\\Jerry\\sys\\{folder}\\{file}'
    pieces.append(predict(filepath))
squares = [
    'A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'A2', 'B2', 'C2', 'D2',
    'E2', 'F2', 'G2', 'H2', 'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3',
    'A4', 'B4', 'C4', 'D4', 'E4', 'F4', 'G4', 'H4', 'A5', 'B5', 'C5', 'D5',
    'E5', 'F5', 'G5', 'H5', 'A6', 'B6', 'C6', 'D6', 'E6', 'F6', 'G6', 'H6',
    'A7', 'B7', 'C7', 'D7', 'E7', 'F7', 'G7', 'H7', 'A8', 'B8', 'C8', 'D8',
    'E8', 'F8', 'G8', 'H8'
]
i = 0
final_out = []
for piece in reversed(pieces):  #A1 to H8
    final_out.append((squares[i], piece))
    i += 1
for i in range(len(final_out)):
    print(final_out[i], " ")
Ejemplo n.º 30
0
def main(svg_path, model, mapping):
    #parser = argparse.ArgumentParser()
    #args = parser.parse_args()

    doc = minidom.parse(svg_path + os.sep +
                        'output.svg')  # parseString also exists
    path_strings = [
        path.getAttribute('d') for path in doc.getElementsByTagName('path')
    ]
    height = float([
        path.getAttribute('height') for path in doc.getElementsByTagName('svg')
    ][0])
    width = float([
        path.getAttribute('width') for path in doc.getElementsByTagName('svg')
    ][0])
    scale_raw = [
        path.getAttribute('transform')
        for path in doc.getElementsByTagName('g')
    ]
    scale = 1
    try:
        for maybe in scale_raw:
            if 'scale' in maybe:
                scale = float(re.sub(r's.*,', '', maybe).replace(')', ''))
                height /= scale
                break
    except:
        print("No scale present")

    def dist_f(x, y):
        return np.sqrt(np.sum((x - y)**2))

    points = None
    ends = []
    groups = None
    for path in path_strings:
        path_parse = parse_path(path)
        length = path_parse.length(error=1e-5)
        if length < (max(width / 500, 20)):
            continue
        num_points = 200
        point_arr = np.asarray([[
            int(round(path_parse.point(x).real)),
            int(round(path_parse.point(x).imag))
        ] for x in np.linspace(0, 1, num_points)],
                               dtype=np.float32)
        print(point_arr.min(axis=0), point_arr.max(axis=0))
        if points == None:
            points = [{'points': point_arr, 'path': [path]}]
        # combining in loop
        grouped = False
        # for n, p in enumerate(points):
        #     pts = p['points']
        #     dist_arr = distance.cdist(pts, point_arr, 'euclidean')
        #     dist = dist_arr.min()
        #     if dist < width / 15:
        #         print("Combined")
        #         if groups is None:
        #             groups = [{'points': point_arr, 'paths': [path]}]
        #         else:
        #             for g in groups:
        #                 if p['path'] in g['paths']:
        #                     grouped = True
        #                     arr_1 = g['points']
        #                     g['points'] = np.concatenate((g['points'], point_arr))
        #                     g['paths'].append(path)
        #         break
        if grouped == False:
            if groups is None:
                groups = [{'points': point_arr, 'paths': [path]}]
            groups.append({'points': point_arr, 'paths': [path]})
        points.append({'points': point_arr, 'path': path})
    print("Combined groups ", len(groups))
    gl = 0
    for i in groups:
        gl += len(i['paths'])
    print(gl, len(path_strings))
    groups = combineGroups(groups, width, 200)
    words = combineWords(groups, width, 15)
    fixWordOrder(words)
    fixLetters(words)
    boxes = []
    image = cv2.imread(svg_path + os.sep + "extract.jpg")
    height = int(height * scale * 0.8)
    width = int(width * 0.8)
    print(len(groups))
    # formBoundaries(groups, image, scale, width, height)
    # formBoundaries(words, image, scale, width, height)
    gl = 0
    for i in words:
        gl += len(i['letters'])
    print(gl)
    print(len(words))
    image2 = copy.deepcopy(image)
    word_letters = []
    for ind, w in enumerate(words):
        tmp_folder = svg_path + "word-" + str(ind) + os.sep
        word_letters.append({'path': tmp_folder, 'len': len(w['letters'])})
        if not os.path.exists(tmp_folder):
            os.makedirs(tmp_folder)
        formBoundaries(w['letters'], image, scale, width, height, tmp_folder,
                       image2)
    cv2.imwrite(svg_path + "line_detect.jpg", image2)
    print(word_letters)
    words = []
    for ww in word_letters:
        word = ''
        for l in range(ww['len']):
            char_path = ww['path'] + str(l) + ".jpg"
            prediction = classify.predict(char_path, model, mapping)
            word += prediction
        words.append(word)
    f = open(svg_path + 'prediction.txt', 'w')
    sentence = (" ").join(words)
    f.write(sentence)
    f.close()
    return sentence
Ejemplo n.º 31
0
def predict_demo():

    beg_time = time.time()
    opt = parse_opts()
    opt.reload_path = os.path.join(opt.root_path, opt.reload_path)
    #opt.image_path = os.path.join(opt.root_path,opt.image_path)
    #opt.mask_path = os.path.join(opt.root_path,opt.mask_path)

    opt.image_path = os.path.join(opt.root_path,
                                  '../../datas/test_data/' + opt.image_path)
    opt.mask_path = os.path.join(opt.root_path,
                                 '../../datas/test_data/' + opt.mask_path)
    '''#########################################################################'''
    # ----------reload pretrained models-------------#
    '''#########################################################################'''

    beg_time = time.time()
    # segment checkpoint
    seg_reload_file = os.path.join(opt.reload_path, opt.seg_reload_model)
    seg_model, _ = model.generate_model(opt, phase='segment')
    seg_checkpoint = torch.load(seg_reload_file)
    seg_state_dict = seg_checkpoint['state_dict']
    if not opt.no_cuda:  #使用GPU
        seg_model.load_state_dict(seg_state_dict)
    else:  #使用CPU:
        new_seg_state_dict = OrderedDict()
        for k, v in seg_state_dict.items():
            name = k[7:]  # remove `module.`
            new_seg_state_dict[name] = v
        seg_model.load_state_dict(new_seg_state_dict)

    # loading classify checkpoint
    cla_reload_file = os.path.join(opt.reload_path, opt.cla_reload_model)
    cla_model, _ = model.generate_model(opt, phase='classify')

    cla_checkpoint = torch.load(cla_reload_file)
    cla_state_dict = cla_checkpoint['state_dict']
    if not opt.no_cuda:  #使用GPU
        cla_model.load_state_dict(cla_state_dict)
    else:  #使用CPU:
        cla_new_state_dict = OrderedDict()
        for k, v in cla_state_dict.items():
            name = k[7:]  # remove `module.`
            new_cla_state_dict[name] = v
        cla_model.load_state_dict(new_cla_state_dict)
    print('load model cost time:', time.time() - beg_time)

    # loop evalutation datas
    results = pd.DataFrame()
    names = os.listdir(opt.image_path)
    for i, name in enumerate(names):
        predict_beg_time = time.time()
        # load data
        image_path = os.path.join(opt.image_path, name)
        mask_path = os.path.join(opt.mask_path, name)
        state = get_patient_image_mask(image_path, mask_path,
                                       opt.seg_sample_size)
        if state == None:
            print('file have error shape')
            continue
        else:
            image, mask = state

        # seg
        seg_loader = get_seg_inputs(image, opt)
        seg_pred = segment.predict(seg_loader, seg_model, opt)
        seg_result, _ = get_seg_outputs(seg_pred, mask, opt.seg_pred_thresh)

        # cla
        cla_loader = get_cla_inputs(image, seg_result, opt)
        cla_prob, cla_pred = classify.predict(cla_loader, cla_model, opt)
        cla_result = get_cla_outpus(cla_prob, cla_pred, seg_result)

        # combin result
        ID = name.split('.')[0]
        cla_result['ID'] = ID
        results = results.append(cla_result)
        print('loop %d: predict %s cost time:' % (i, name),
              time.time() - predict_beg_time)
    targets = pd.read_csv(os.path.join(
        opt.root_path, '../../datas/origin_datas/csv/annotations_detail.csv'),
                          dtype={'ID': str})
    results.to_csv('outputs.csv', index=False)

    statistics(results, targets)
    print('predict all file cost time:', time.time() - beg_time)
Ejemplo n.º 32
0
import streamlit as st
from PIL import Image
from classify import predict

st.title("Image Classifier ML Web")
selected_model = st.sidebar.selectbox('모델 선택', ('xception', 'vgg16'))

uploaded_file = st.file_uploader('이미지 업로드', type=("png", "jpg"))

if uploaded_file is not None:
    image = Image.open(uploaded_file)
    st.image(image, caption='업로드된 이미지', use_column_width=True)
    st.write("")
    st.write("분류중...")
    label = predict(uploaded_file, selected_model)
    st.success('%s (%.2f%%)' % (label[1], label[2] * 100))
def plot_MDR_vs_mag(clfFile, X, fileList, infoFile, threshold=0.5, color="#FF0066"):
    
    pred = predict(clfFile, X)

    mags = []
    for line in open(infoFile,"r").readlines():
        #print line.rstrip().split(",")[-1].split("/")[-1] in set(fileList)
        if line.rstrip().split(",")[-1].split("/")[-1] in set(fileList):
            #print float(line.rstrip().split(",")[4])
            mags.append(float(line.rstrip().split(",")[4]))
 
    font = {"size"   : 20}
    plt.rc("font", **font)
    plt.rc("legend", fontsize=18)
 
    bins = np.arange(14,23,1)
    n, bins, patches = plt.hist(mags, bins=bins)
    #print bins
    bin_allocations = np.digitize(mags, bins)
    #print bin_allocations
    MDRs = []
    for i in range(1,len(bins)):
        if n[i-1] == 0:
            MDRs.append(0)
            continue
        preds_for_bin = pred[np.squeeze(np.where(bin_allocations == i))]

        MDRs.append(np.shape(np.where(preds_for_bin >= threshold))[1] / float(n[i-1]))
    print MDRs
    mid_points = []
    for i in range(len(bins)-1):
        mid_points.append(np.mean([bins[i], bins[i+1]]))

    fig = plt.figure()
    ax2 = fig.add_subplot(111)
    bins = np.arange(14,23,1)
    ax1 = ax2.twinx()
    ax2.set_xlabel("Magnitude")
    ax1.set_ylabel("Frequency")
    ax1.set_ylim(ymin=0-0.01*np.max(n), ymax=np.max(n)+0.01*np.max(n))
    n, bins, patches = ax1.hist(mags, bins=bins, color=color, \
                                alpha=0.25, edgecolor="none")#FF0066
    ax2.set_zorder(ax1.get_zorder()+1)
    ax2.patch.set_visible(False)
    ax2.set_ylim(ymin=-0.01, ymax=1.01)

    #print np.shape(MDRs)
    #print np.shape(mid_points)
    #oldMDRs = [1.0, 0.5555555555555556, 0.6666666666666666, 1.0, 0.6666666666666666, \
    #        0.6097560975609756, 0.2923076923076923, 0.06217616580310881, 0.05673758865248227, \
    #        0.06451612903225806, 0.1794871794871795]

    #ax2.plot(mid_points, oldMDRs, "-",label="old", color = "#66FF33", lw=2, alpha=0.5)
    #ax2.plot(mid_points, oldMDRs, ".", color = "#66FF33", ms=10, markeredgecolor='none', alpha=0.5)

    ax2.plot(mid_points, MDRs, "-",color = "k", lw=3)
    ax2.plot(mid_points, MDRs, "-",label="new", color = color, lw=2)
    ax2.plot(mid_points, MDRs, "o", color = color, ms=5)#3366FF
    ax2.plot(mid_points+[12,18,25], 0.211*np.ones(np.shape(mid_points+[12, 18, 25])), "--", color="k", lw=2)

    ax2.set_ylabel("Missed Detection Rate")
    ax2.set_xlim(xmin=13.9, xmax=22.1)
    ax2.grid()
    ax2.text(14.1,0.215,"0.211", size=18, color="k")


    #ax2.legend()
    plt.show()