Esempio n. 1
0
def learning():
    # add to learning dataset
    files = []
    for i in [1, 3, 5]:
        for j in range(1, 8):
            filename = r'.\image\bdd\pose' + str(j) + '_' + str(i) + '.png'
            files.append([filename, j])
    data = []
    for file in files:
        print(file[0])
        nbClass = file[1]
        img = lireFichier(file[0])
        graph = skeleton(img)
        firstFilter(graph)
        secondFilter(graph)
        thirdFilter(graph)
        deleteCircle(graph)
        data_head, data_lefthand, data_righthand, data_leftleg, data_rightleg = calculateDataset(
            graph)
        percentage_length_left = data_lefthand[3] / data_lefthand[2]
        percentage_angle_left = data_lefthand[4] / 90
        percentage_length_right = data_righthand[3] / data_righthand[2]
        percentage_angle_right = data_righthand[4] / 90
        data.append([
            percentage_length_left, percentage_angle_left,
            percentage_length_right, percentage_angle_right, nbClass
        ])
        # data.append([data_head, data_lefthand, data_righthand, data_leftleg, data_rightleg])
        # drawGraph(graph)
    return data
Esempio n. 2
0
def main():
    # Load data

    import cv2
    import glob
    img_dir = "train_custom/"  # Enter Directory of all images
    data_path = os.path.join(img_dir, '*g')
    files = glob.glob(data_path)
    data = []
    for f1 in files:
        img = rgb2gray(cv2.imread(f1))
        data.append(img)

    # Preprocessing
    print("Start to data preprocessing...")
    data = [preprocessing(d) for d in data]

    # Create Hopfield Network Model
    model = network.HopfieldNetwork()
    model.train_weights(data)

    # Generate testset
    img_dir = "test_custom/"  # Enter Directory of all images
    data_path = os.path.join(img_dir, '*g')
    files = glob.glob(data_path)
    test = []
    # Since same name order will be the same
    for f1 in files:
        img = rgb2gray(cv2.imread(f1))
        test.append(img)
    test = [preprocessing(d) for d in test]

    predicted = model.predict(test, threshold=0, asyn=False)
    print("Show prediction results...")
    plot(data, test, predicted)
Esempio n. 3
0
def get_train_data(sample_path, vector_stub):

    os.chdir(sample_path)

    data = []
    label = []

    target_set = set(
        [elem for elem in os.listdir(sample_path) if os.path.isdir(elem)])
    print target_set
    class_num = len(target_set)
    class_dict = dict(zip(target_set, range(class_num)))

    for root, dirs, files in os.walk(sample_path, topdown=False):
        word_data = [
            get_icf_feature(cv2.imread(os.path.join(root, name)), vector_stub)
            for name in files if os.path.splitext(name)[-1] in EXT_DICT
        ]
        for elem in word_data:
            label.append(class_dict[root.split(os.sep)[-1]])
            data.append(elem)

    data = np.array(data)
    label = np.array(label)
    return data, label, class_dict
Esempio n. 4
0
def load_parallel(files, id_label, path, test_model):
    process_list = list()
    X = []
    Y = []
    files = cv.n_list(files, n_cores)

    for id_core in range(n_cores):
        p = multiprocessing.Process(target=load_X_compress_parallel,
                                    args=(files[id_core], id_label, path,
                                          test_model, id_core))
        p.start()
        process_list.append(p)

    # Join all the threads
    for pp in process_list:
        pp.join()

    for i in range(n_cores):
        X_core, Y_core = (return_process_dict[i])
        X.append(X_core)
        if test_model == True:
            Y.append(Y_core)

    # resultado dos cores na mesma ordem do ids de entradas
    data = []
    labels = []
    for core in range(n_cores):
        for index in range(len(X[core])):
            data.append(X[core][index])
            if test_model == True:
                labels.append(Y[core][index])

    return data, labels
Esempio n. 5
0
def main():
    #This code sets up the parser for command line arguments specifying parameters for creating the h5py file.
    parser = argparse.ArgumentParser()
    parser.add_argument("data_f")
    parser.add_argument("label_f")
    parser.add_argument("-o",
                        "--output",
                        action='store',
                        default="data.h5",
                        help='output data dir and filename')
    parser.add_argument("-f",
                        "--force",
                        action='store_false',
                        help="overwrite output file")
    args = parser.parse_args()

    OUTPUT_FILE = args.output
    DATA_DIR = args.data_f
    LABEL_FILE = args.label_f
    SOURCE_IMAGES = os.path.join(DATA_DIR)

    images = glob(os.path.join(SOURCE_IMAGES, "*.jpg"))
    labels = pd.read_csv(LABEL_FILE, na_values='-')
    #np.savez("asdfasdf",labels.to_numpy())
    labels.to_hdf("labeldata.h5", "labels", mode="w")
    #reread = pd.read_hdf('./labeldata.h5')
    data = []

    print("d:{} lb:{} of:{} img:{}".format(args.data_f, args.label_f,
                                           OUTPUT_FILE, images))
    print("{}".format(labels.to_numpy()[0]))
    #if path.isfile(OUTPUT_FILE) and args.force:

    #  Image information and resizing
    NUM_IMAGES = len(images)
    HEIGHT = 3024 // 4
    WIDTH = 4032 // 4
    CHANNELS = 3
    SHAPE = (HEIGHT, WIDTH, CHANNELS)

    with h5py.File('imagedata.h5', 'w') as hf:

        for i, img in enumerate(tqdm(images)):
            image = io.imread(img)
            image = resize(image, (WIDTH, HEIGHT), anti_aliasing=True)
            data.append(image)
            np.savez("progressivesave", images=data[i])

        #data.to_hdf("imagedata.h5", "images",mode="w")
        #want to save a new image to h5 one at a time like the npz
        # imgs = hf.create_dataset(
        #     name='images',
        #     data=data,
        #     #shape=(HEIGHT, WIDTH, CHANNELS),
        #     #maxshape=(HEIGHT, WIDTH, CHANNELS),
        #     compression="gzip",
        #     compression_opts=9)
    hf.close()
    imgs = np.array(data)
    np.savez("dataimages", images=data)
def fillNetData(idClass, elements, absClassPath, absdataDirPath):
    '''
		idClass: class id to identify each img in elements
		elements: list of images
		absClassPath: absolute path to the current images holder (class)
		absdataDirPath: target path to copy all images in elements

		retunrs a list of string pairs: absoluteImagePath idClass
	'''

    data = []
    for img in elements:
        # absolute path to current the image
        absFilePath = os.path.join(absClassPath, img)

        # path to caffe image holder
        targetPath = os.path.join(absdataDirPath, img) + '.jpg'

        # Save the image in the caffe image holder
        resizeAndSave(absFilePath, SIZE, SIZE, targetPath)

        # to copy the pic without RESIZE uncomment next line and comment the previous line
        #shutil.copy(absFilePath, absdataDirPath)

        # add the image path and their class to train
        data.append(str(targetPath) + ' ' + str(idClass))

    return data
Esempio n. 7
0
def addArtificialData():
    print "here"
    baseName = os.path.basename(leftEyePath)
    print baseName
    data_dir = os.path.join(projectPath,baseName)
    print data_dir
    files = os.listdir(data_dir)
    files = [f for f in files if f.split('.')[-1]=='txt']
    print files
    data = []
    for f in files:
        label = f.split('.')[0]
        filePath = os.path.join(data_dir,f)
        with open(filePath,'r') as r:
            for image in r:
                data.append(image.strip())
    #print data
    for f in data:
        parentDir =  os.path.dirname(f)
        image_name = f.split('/')[-1].split('.')[0]
        scale_image_name = os.path.join(parentDir,image_name+'_s.jpg')
        roate_image_name = os.path.join(parentDir,image_name+'_r.jpg')
        print image_name
        img = io.imread(f,as_grey=True)
        scale_image = rescale(img,0.9)
        rotated_image = rotate(img,5,resize=False)
        print img.shape
        print scale_image.shape
        print rotated_image.shape
        io.imsave(scale_image_name,scale_image)
        io.imsave(roate_image_name,rotated_image)
        raw_input()
Esempio n. 8
0
    def segment_data(self, frame, segments):
        data = []
        self.superpixel.fill(0)
        for i in range(1 + np.max(segments)):
            pts = np.where(segments == i)
            col = np.average(frame[pts[0], pts[1]], axis=0).astype(np.uint8)

            #center of contour
            self.tmp.fill(0)
            self.tmp[pts] = 255
            c = cv2.findContours(self.tmp, cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)[-2][0]
            M = cv2.moments(c)

            if abs(M["m00"]) > 1e-3:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])

                self.superpixel[pts] = col

                data.append({
                    'idx': i,
                    'size': np.count_nonzero(pts),
                    'color': col,
                    'center': (cY, cX)
                })
        return data
Esempio n. 9
0
def draw_pic(rgb):
    data = []
    for i in range(rgb.shape[0]):
        for j in range(rgb.shape[1]):
            data.append(int(rgb[i][j]))
    plt.hist(data, bins=256, facecolor="blue", edgecolor="black", alpha=0.7)
    plt.show()
Esempio n. 10
0
def populate_3d_graph(
    dummy2_children,
    show_hide_seg_3d,
    drawn_shapes_data,
    last_3d_scene,
    last_render_id,
    image_display_top_figure,
    image_display_side_figure,
):
    # extract which graph shown and the current render id
    graph_shown, current_render_id = dummy2_children.split(",")
    current_render_id = int(current_render_id)
    start_time = time.time()
    cbcontext = [p["prop_id"] for p in dash.callback_context.triggered][0]
    # check that we're not toggling the display of the 3D annotation
    if cbcontext != "show-hide-seg-3d.children":
        PRINT("might render 3D, current_id: %d, last_id: %d" %
              (current_render_id, last_render_id))
        if graph_shown != "3d shown" or current_render_id == last_render_id:
            if current_render_id == last_render_id:
                PRINT("not rendering 3D because it is up to date")
            return dash.no_update
    PRINT("rendering 3D")
    segs_ndarray = shapes_to_segs(
        drawn_shapes_data,
        image_display_top_figure,
        image_display_side_figure,
    ).transpose((1, 2, 0))
    # image, color
    images = [
        (img.transpose((1, 2, 0))[:, :, ::-1], "grey"),
    ]
    if show_hide_seg_3d == "show":
        images.append((segs_ndarray[:, :, ::-1], "purple"))
    data = []
    for im, color in images:
        im = image_utils.combine_last_dim(im)
        try:
            verts, faces, normals, values = measure.marching_cubes(im,
                                                                   0,
                                                                   step_size=3)
            x, y, z = verts.T
            i, j, k = faces.T
            data.append(
                go.Mesh3d(x=x,
                          y=y,
                          z=z,
                          color=color,
                          opacity=0.5,
                          i=i,
                          j=j,
                          k=k))
        except RuntimeError:
            continue
    fig = go.Figure(data=data)
    fig.update_layout(**last_3d_scene)
    end_time = time.time()
    PRINT("serverside 3D generation took: %f seconds" %
          (end_time - start_time, ))
    return (fig, current_render_id)
Esempio n. 11
0
def HOG_data_measurement(file_list_):
    data = []
    for path in file_list:
        grey_img = load_image_and_pre_processing(path)
        hog_data,hog_image = hog(grey_img, orientations=8, pixels_per_cell=(8, 8),
                    cells_per_block=(1, 1), visualise=True)
        data.append(hog_data)
    return data
Esempio n. 12
0
def get_HOG_Features(trainingPath, testingPath, cell_size=16, bin_size=8):
    from hog import Hog_descriptor

    # initialize the local binary patterns descriptor along with the data and label lists
    data = []
    labels = []
    test_data = []
    test_labels = []

    start_time = time.time()
    # loop over the training images
    for imagePath in paths.list_files(trainingPath, validExts=(".png",".ppm")):
        # open image
        img = cv2.imread(imagePath)
        gray = np.matrix(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
        resized_image = cv2.resize(gray, (48, 48))

        # get hog features
        hog = Hog_descriptor(resized_image, cell_size=cell_size, bin_size=bin_size)
        vector = hog.extract()
        v = np.array(vector)

        # extract the label from the image path, then update the
        # label and data lists
        labels.append(int(imagePath.split("/")[-2]))
        data.append(vector)

    # loop over the testing images
    for imagePath in paths.list_files(testingPath, validExts=(".png",".ppm")):
        
        # open image
        img = cv2.imread(imagePath)
        gray = np.matrix(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
        resized_image = cv2.resize(gray, (48, 48))

        # get hog features
        hog = Hog_descriptor(resized_image, cell_size=cell_size, bin_size=bin_size)
        vector = hog.extract()

        # extract the label from the image path, then update the
        # label and data lists
        test_labels.append(int(imagePath.split("/")[-2]))
        test_data.append(vector)

    feature_extraction_runtime = (time.time() - start_time)

    data = np.array(data)
    labels = np.array(labels)
    test_data = np.array(test_data)
    test_labels = np.array(test_labels)

    print "[INFO] HOG Features are ready!"
    print "Total image: ", len(data) + len(test_data)
    print "Feature extraction runtime: ", feature_extraction_runtime
    print "Average for one image:", feature_extraction_runtime / (len(data) + len(test_data))


    return (data, labels, test_data, test_labels)
Esempio n. 13
0
def get_LBP_Features(trainingPath, testingPath ,p=24, r=8):
    from localbinarypatterns import LocalBinaryPatterns
    from sklearn.utils import shuffle

    # initialize the local binary patterns descriptor along with the data and label lists
    desc = LocalBinaryPatterns(p, r)
    data = []
    labels = []
    test_data = []
    test_labels = []

    start_time = time.time()
    # loop over the training images
    for imagePath in paths.list_files(trainingPath, validExts=(".png",".ppm")):
        
        # load the image, convert it to grayscale, and describe it
        image = cv2.imread(imagePath)
        gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
        resized_image = cv2.resize(gray, (32, 32))
        hist = desc.describe(resized_image)
        hist = hist / max(hist)

        # extract the label from the image path, then update the
        # label and data lists
        labels.append(int(imagePath.split("/")[-2]))
        data.append(hist)

    # loop over the testing images
    for imagePath in paths.list_files(testingPath, validExts=(".png",".ppm")):

        # load the image, convert it to grayscale, describe it, and classify it
        image = cv2.imread(imagePath)
        gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
        resized_image = cv2.resize(gray, (32, 32))
        hist = desc.describe(resized_image)
        hist = hist / max(hist)

        # extract the label from the image path, then update the
        # label and data lists
        test_labels.append(int(imagePath.split("/")[-2]))
        test_data.append(hist)

    feature_extraction_runtime = (time.time() - start_time)

    data = np.array(data)
    labels = np.array(labels)
    #test_data = np.array(test_data)
    #test_labels = np.array(test_labels)

    data, labels = shuffle(data,labels)

    print "[INFO] LBP Features are ready!"
    print "Total image:", len(data) + len(test_data)
    print "Feature extraction runtime:", feature_extraction_runtime
    print "Average for one image:", feature_extraction_runtime / (len(data) + len(test_data))

    return (data, labels, test_data, test_labels)
def doPreProcessing(images, path):
    data = []
    sizeOfData = len(images)
    for i, image in enumerate(images):
        img = cv2.imread(path + image, cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (IMG_SIZE, IMG_SIZE), interpolation= cv2.INTER_LINEAR)
        imgData, _ = hog(image, orientations=8, pixels_per_cell=(10, 10),cells_per_block=(2, 2),visualize=True,block_norm='L2')
        data.append([val[0] for val in imgData.tolist()])
        updateProgress(i, sizeOfData)
    return np.array(data)
def train_many_svm(hog, data, labels):
    data = []
    for i in dataImg:
        data.append(calculate_hog(hog, i))

    train_data, eval_data, train_labels, eval_labels = train_test_split(data, labels, test_size=0.2, shuffle=True)
    scores = []

    cList = (0.1, 0.5, 1, 2, 4, 6, 8, 10, 100, 200)
    X = np.arange(0.05, 2, 0.1)
    tolList = (1e-4, 1e-3, 1e-2, 0.1, 1, 10)
    Y = np.arange(0.05, 2, 0.1)
    max_score = 0
    max_params = ""
    print ("Number of training samples: ", len(train_data), "\nNumber of samples for evaluation: ", len(eval_data))
    for param1 in X.tolist():
        for param2 in Y.tolist(): 
            start = time.time()
            clf = svm.SVC(C=param1, tol=1.55, cache_size=100, kernel='rbf')
            data_scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
            data_scalerTransf = data_scaler.fit_transform(train_data)
            clf.fit(data_scalerTransf,train_labels)
            data_scalerTransf = data_scaler.fit_transform(train_data)

            numOk = 0
            eval_data_transform = []
            for j in range (len(eval_data)):
                trans_data = data_scaler.transform(eval_data[j].reshape(1,-1))
                eval_data_transform.append(trans_data)
                prediction = clf.predict(trans_data)
                if (prediction == eval_labels[j]): numOk += 1
            score2 = clf.score(eval_data_transform, eval_labels)
            finish = time.time()
            score = np.around(numOk/len(eval_data)*100,2)
            if score > max_score:
                max_params = 'Score: '+ str(score), " %, param1 = ", str(param1), "param2 = ", str(param2)
                max_score = score
            scores.append(score)
            print ('Score: ', score, " %, param1 = ", param1, "param2 = ", param2)
            print ('Time: ', finish-start)
    print (max_params)
    X, Y = np.meshgrid(X,Y)
    Z = np.asarray(scores).reshape(X.shape[0], Y.shape[1])

    fig = plt.figure()
    ax = Axes3D(fig)
    ax.set_xlabel('C')
    ax.set_ylabel('tol')
    ax.set_zlabel('result')
    ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.viridis)

    plt.show()

    return clf, data_scaler
Esempio n. 16
0
def preprocess_frames(frames,numbers,process_list,ht,wd,channels,ROI_array=None):
    data=[]
    for x,i in zip(frames, range(0,frames.__len__())):
        #print(x,i)
        img=None
        #FIrst loading image
        if channels==1:
            img=cv2.imread(x,0) #Use this for RGB to GS #TODO change this based on dset
            img=img.reshape(img.shape[0],img.shape[1],1)

            # print(img.shape
        elif channels==3:
            # print("Thermal pose IUV")
            img=cv2.imread(x,1)#1 for color images

        #resize
        # img=img.reshape(ht,wd,3)
        if 'ROI_frame' in process_list:
            box=ROI_array[i,:]
            left, top, right, bottom=int(box[1]),int(box[0]),int(box[3]),int(box[2])
            #Pixel outside the ROI are assigned -1, GAN inpu range []-1,1]
            out=-np.ones(shape=img.shape,dtype='float32')
            patch=img[top:bottom,left:right,:]
            #Normalizing only inside ROI
            if 'Processed' in process_list:
                mean=np.mean(patch,axis=(0,1))
                patch=patch-mean
                patch=patch/ 255.0

            out[top:bottom,left:right,:]=patch
            img=cv2.resize(out,(ht,wd))
            img=img.reshape(ht,wd,channels)#resize on may remove the channels dis

        elif 'Processed' in process_list:
            img=cv2.resize(img,(ht,wd))

            img=img.reshape(ht,wd,channels)

            img=img-np.mean(img,axis=(0,1))#Mean centering
            img=img.astype('float32') / 255. #normalize
        else:
            img=cv2.resize(img,(ht,wd))
            img=img.reshape(ht,wd,channels)

        data.append(img)


    #data = data.reshape((len(data), np.prod(data.shape[1:]))) #Flatten the images
    data=np.array(data)

    print('data.shape', data.shape)

    # print(numbers)
    return data,numbers,frames
def load_set(folder, shuffle=False):
    img_list = sorted(glob.glob(os.path.join(folder, '*.png')) + \
                      glob.glob(os.path.join(folder, '*.jpg')) + \
                      glob.glob(os.path.join(folder, '*.jpeg')))
    if shuffle:
        np.random.shuffle(img_list)
    data = []
    filenames = []
    for img_fn in img_list:
        img = load_image(img_fn)
        data.append(img)
        filenames.append(img_fn)
    return data, filenames
Esempio n. 18
0
def generate_(data_range):
    '''
    generate a data in form [Fx,Fy], chosen randomly from @data_range
    '''
    data = []
    x = np.random.choice(data_range, 1)
    x = x[0]
    y = np.random.choice(data_range, 1)
    y = y[0]
    # y=-x
    data.append(float(x))
    data.append(float(y))
    return data
Esempio n. 19
0
def loadData(parentDir):
    '''From the parentDir get the dir name where .txt files are stored.
    This function will return data and the labels associated with it'''
    baseName = os.path.basename(parentDir)
    data_dir = os.path.join(projectPath,baseName)
    files = os.listdir(data_dir)
    files = [f for f in files if f.split('.')[-1]=='txt']
    data = []
    for f in files:
        label = f.split('.')[0]
        filePath = os.path.join(data_dir,f)
        with open(filePath,'r') as r:
            for image in r:
                data.append([image.strip(),label])
    return data
Esempio n. 20
0
 def extract_info(self, img, index, perc):
     data = []
     #print(img[0])
     for i in range(0, index):
         data.append(img[i//self.x][i%self.x])
     b = index//8
     with open('lsb_data/lsb_perc{}_data.txt'.format(perc), "wb") as f:
         for i in range(0, b):
             tmp = data[i*8 : i*8+8]
             t = 0
             n = 7
             for j in tmp:
                 t += (j%2) * 2**n
                 n -= 1
             f.write(bytes([t]))
def dataImage(request):
    data = []
    for x in range(1, 451):
        data.append("/static/data/a (" + str(x) + ").JPG")
    paginator = Paginator(data, 15)
    page_number = request.GET.get('page')
    page_obj = paginator.get_page(page_number)

    if page_number is None:
        no = 0
    else:
        no = int(page_number) * 15 - 15

    context = {'data': page_obj, 'no': no, 'page': page_number}
    return render(request, 'dataImage.html', context)
Esempio n. 22
0
def load_data(result):
    data = []
    n, m = result.shape  #活的图片大小
    imap = np.zeros((m, n))
    black = 0
    for i in range(m):
        for j in range(n):  #将每个像素点RGB颜色处理到0-1范围内并存放data
            point = result[j][i]
            if not point == black:
                x = result[j][i]
                #y= result[j][i][1]
                #z= result[j][i][2]
                imap[i][j] = 1
                data.append([x / 255.0])
    return data, m, n, imap  #以矩阵型式返回data,图片大小
Esempio n. 23
0
def collectImages_for_dataset1(letter, freq):
    speakers = ['A', 'B', 'C', 'D', 'E']
    path = 'dataset1/'
    data = []
    for s in speakers:
        image_path = path + s + '/' + letter + '/'
        names = glob.glob(image_path + "*.*")
        for i in range(0, len(names), freq):
            name = names[i]
            image = cv2.imread(name)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = convert_size_for_dataset1(image)
            #plt.imshow(clear_background_rough(image))
            data.append(image)
    return data
Esempio n. 24
0
    def searchForImage(
        self,
        folderPath,
        fileName,
        functionOfSImilarity,
        scale=1.0,
    ):
        queryImage = LabImage(io.imread(folderPath + fileName), fileName)
        queryImage.featuresValues = self.featuresExtractor(queryImage.img)
        orderedIDs = [i for i in range(0, len(self.images))]
        similarities = [
            functionOfSImilarity(queryImage.featuresValues,
                                 image.featuresValues) for image in self.images
        ]

        tmp = list(zip(orderedIDs, similarities))
        tmp.sort(key=lambda x: x[1], reverse=True)
        orderedIDs, similarities = zip(*tmp)
        ### DISPLAY QUERY
        figure(figsize=(5.0 * scale, 5.0 * scale), dpi=80)
        subplot(1, 1, 1)
        plt.imshow(queryImage.img)
        plt.show()
        print(queryImage.ID)
        #features
        data = []
        for j in range(0, len(self.featuresNames)):
            if isinstance(queryImage.featuresValues, np.ndarray):
                fV = []
                fV.append("- " + self.featuresNames[j])
                fV.append("NaN")
                data.append(fV)
            else:
                fV = []
                fV.append("- " + self.featuresNames[j])
                fV.append("{:10.2f}".format(queryImage.featuresValues[j]))
                data.append(fV)

        col_width = max(len(word) for row in data
                        for word in row) + 5  # PADDING
        for row in data:
            print("".join(word.ljust(col_width) for word in row))
        ### DISPLAY RESULTS
        self.display(scale=scale,
                     orderedIDs=orderedIDs,
                     similarities=similarities)
Esempio n. 25
0
def get_text(text, filename, size):
    face = Face(filename)
    face.set_char_size(POINT_SIZE(size))
    pen = FT_Vector(0,0)
    flags = FT_LOAD_RENDER
    xmin,xmax,ymin,ymax = get_text_extents(text, filename, size)
    L = np.zeros((ymax-ymin, xmax-xmin),dtype=np.ubyte)
    previous = 0
    pen.x, pen.y = 0, 0
    for c in text:
        import sys
        sys.stdout.flush()
        face.load_char(c, flags)
        kerning = face.get_kerning(previous, c)
        previous = c
        bitmap = face.glyph.bitmap
        pitch  = face.glyph.bitmap.pitch
        width  = face.glyph.bitmap.width
        rows   = face.glyph.bitmap.rows
        top    = face.glyph.bitmap_top
        left   = face.glyph.bitmap_left
        pen.x += kerning.x
        x = (pen.x >> 6) - xmin + left
        y = (pen.y >> 6) - ymin - (rows - top)
        data = []
        d = bitmap.buffer[:]
        for j in range(rows):
            data.append(d[j*pitch:j*pitch+width])
        data = list(itertools.chain(data))
        if len(data):
            Z = np.array(data,dtype=np.ubyte).reshape(rows, width)
            L[y:y+rows,x:x+width] |= Z[::-1,::1]
        pen.x += face.glyph.advance.x
        pen.y += face.glyph.advance.y

    # Create a 10 pixel border for the image    
    L = np.flipud(L)
    rows, cols = L.shape
    row = 10*[[0]*cols]
    L = np.vstack((row, L, row))

    rows, cols = L.shape
    col = np.array([[0]*10]*rows)
#    print col.shape
    L = np.hstack((col, L, col))
    return L
def trainData():
    print('training Started...')
    data = []
    labels = []
    count = 0
    dataFile = open('data.txt', 'w')
    labelFile = open('labels.txt', 'w')

    for car in vehicleList[:40000]:
        # extract the make of the car
        make = car.get('carType')
        imagePath = car.get('imagePath')

        # load the image, convert it to grayscale, and detect edges
        image = cv2.imread(imagePath)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        edged = imutils.auto_canny(gray)

        # find contours in the edge map, keeping only the largest one which
        # is presmumed to be the car logo
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2(cnts) else cnts[1]
        c = max(cnts, key=cv2.contourArea)

        # extract the logo of the car and resize it to a canonical width
        # and height
        (x, y, w, h) = cv2.boundingRect(c)
        logo = gray[y:y + h, x:x + w]
        logo = cv2.resize(logo, (200, 100))

        # extract Histogram of Oriented Gradients from the logo
        H = feature.hog(logo, orientations=9, pixels_per_cell=(10, 10),
                        cells_per_block=(2, 2), transform_sqrt=True, block_norm="L1")

        # update the data and labels
        data.append(H)
        dataFile.write(str(H))
        dataFile.write("\n")
        labels.append(make)
        labelFile.write(str(make))
        labelFile.write("\n")

        count += 1
    return data, labels
Esempio n. 27
0
def load_all_image_by_dir(path, conf):

    start = t.time()
    data = []
    label = []
    file_list = os.listdir(path)
    debug_count = 0

    for file in file_list:
        # debug_count+=1
        # if debug_count>100: break #调试用,省的加载那么长时间图片

        if (file.find(".jpg") == -1 and file.find(".jpeg") == -1
                and file.find(".png") == -1 and file.find(".bmp") == -1
                and file.find(".gif") == -1):
            continue

        file_name = file.split(".")[0]
        img_data = cv2.imread(path + file)
        one_img = preprocess_image(img_data, file_name, conf)
        if (one_img is None):
            logger.debug("处理图像失败%s,处理下一张", file)
            continue

        try:
            v = label_process.label2vector(file_name, conf)
            label.append(v)
        except Exception as e:
            import traceback
            traceback.print_exc()
            logger.error("忽略此文件%s,原因:%r", file_name, str(e))
            continue

        data.append(one_img)

    #把数组堆叠到一起形成一个[20000,100,36,1]的张量
    # print(len(data))
    image_data = np.vstack(data)
    label_data = np.vstack(label)

    logger.info("images data loaded:%r", image_data.shape)
    end = t.time()
    logger.info("加载图像使用了%d秒...", (end - start))

    return image_data, label_data
Esempio n. 28
0
def gen_dataset(n=df.shape[0]):
    data = []
    labels = []
    for i in range(1, n):
        crop, success = getcrop(i)
        if (success):
            data.append(crop)
            labels.append(df[2][i])
        else:
            if (enable_error_output):
                print("[WARNING] Template matching has failed for image: " +
                      str(i))
        print_percentage((i * 100 / (n - 1)), "Fetched " + str(i) + " images:")

    print_percentage(100, "Fetched " + str(n - 1) + " images:")
    print("")
    print("Finished!")
    return data, labels
Esempio n. 29
0
def rr_img(img):
    '''旋转和去除离群点'''
    debug = False
    img = loose.utils.get_rotate_img(img)
    if debug:
        cv.imshow('src_img', img)
    # 计算图片吊弦所在的x坐标
    if len(img) > 0:
        h, w = img.shape
        center = []
        for j in range(h):
            row = []
            for i in range(w):
                if img[j][i] == 255:
                    row.append(i)
            if len(row) > 0:
                center.append(np.mean(row))
        center_x = np.mean(center).astype(np.int)

        # 截取吊弦两侧blank像素的范围
        blank = 20
        for j in range(h):
            for i in range(0, center_x - blank):
                img[j][i] = 0
            for i in range(center_x + blank, w):
                img[j][i] = 0

        if debug: cv.imshow('blank', img)
        # # 去除离散点
        result = img > 1
        result = morphology.remove_small_objects(result, min_size=20, connectivity=1)

        # bool 图片到常规图片
        data = []
        nres = np.zeros(result.shape, dtype=np.uint8)
        for i in range(result.shape[0]):
            for j in range(result.shape[1]):
                if result[i][j] == False:
                    nres[i][j] = 0
                else:
                    nres[i][j] = 255
                    data.append([i, j])
        if debug: cv.imshow('rm oulier', nres)
        return nres
def get_arrowheads(img, lines):
    img = rgb2grey(img)
    img = skimage.img_as_ubyte(img)

    fig, ax = plt.subplots()
    fig.tight_layout()

    data = []
    relevant = []
    id_counter = 0

    # Enden alle Geraden betrachten
    for l in lines:
        start, end = l[1], l[2]
        dst_start, dst_end, index_start, index_end = has_arrowhead(
            l[0], start, end, img, id_counter)
        if (dst_start != -1) and (dst_end != -1):
            data.append(
                (dst_start, dst_end, start, end, index_start, index_end, l[0]))
        id_counter = index_end

    # Pfeilspitzenpaare sortieren nach Gesamtdistanz zur Template Pfeilspitze
    data = [
        i for i in data
        if (i[0] > ARROW_THRESHOLD) and (i[1] > ARROW_THRESHOLD)
    ]
    #data = sorted(data, key=lambda x: x[0] + x[1], reverse=True)
    #data = data[0:10]

    # Auswahl auf Pfeilspitzenpaare mit geringstem Abstand zur Template Pfeilspitze beschränken

    for d in data:
        # Bounding Boxen der Pfeilspitzen einzeichnen
        draw_bounding_box(ax, img, d[2], d[3], d[4], d[5], d[6])
        relevant.append((d[2][1], d[2][0]))
        relevant.append((d[3][1], d[3][0]))

    #print(data)
    print(len(data))
    ax.imshow(img, interpolation='bicubic', cmap=plt.cm.gray)
    #plt.show()

    return relevant
def load_filenames_process_and_save_results(filename, flag_use_skimage_version):
    # load samples filenames
    with open("data\\samples_filenames\\" + filename + "_filenames.pickle", "rb") as f:
        samples_filenames = pickle.load(f)

    data = []
    for sample_filename in samples_filenames:
        single_data = HOG_function(sample_filename, flag_use_skimage_version)
        data.append(single_data)

    print(len(data))
    if flag_use_skimage_version:
        output_filename_modifier = ""
    else:
        output_filename_modifier = "_modified"
    with open("data\\" + filename + output_filename_modifier + ".pickle", "wb") as f:
        pickle.dump(data, f)

    return data
Esempio n. 32
0
def line_fit(frame):
    '''
    获取距离变换,水平投影和垂直投影后的骨架图
    @param frame: cv.img color
    @return:
    nres: 骨架图,gray
    data: 图像中前景坐标
    '''
    if len(frame.shape)==2:
        frame = cv.cvtColor(frame,cv.COLOR_GRAY2BGR)

    # 二值化图像
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    gray = cv.GaussianBlur(gray,(5,5),0)
    ret, binary = cv.threshold(gray, 3, 255, cv.THRESH_BINARY)

    # 距离变换,投影
    result,xpts,ypts = projection(binary)

    # 每一行前景如果像素值多于1,则该行置为0
    cnt = np.sum(result,-1)
    idx = np.where(cnt>255)
    result[idx, :] = 0
    space = 15
    result = result[space:-space,space:-space]

    ## 去除离散点
    result = result >1
    result = morphology.remove_small_objects(result, min_size=2, connectivity=1)

    ## 布尔图还原到灰度图
    data = []
    nres = np.zeros(result.shape,dtype=np.uint8)
    for i in range(result.shape[0]):
        for j in range(result.shape[1]):
            if result[i][j] == False:
                nres[i][j]=0
            else:
                nres[i][j]=255
                data.append([i,j])

    return nres,data
Esempio n. 33
0
		croppedImage = currentImage[(sizeOfImage[0]-sizeOfNewImage[0])/2:(sizeOfImage[0]+sizeOfNewImage[0])/2,(sizeOfImage[1]-sizeOfNewImage[1])/2:(sizeOfImage[1]+sizeOfNewImage[1])/2]
		flippedImageH = flipImageHorizontally(croppedImage)
		flippedImageV = flipImageVertically(croppedImage)
		currentFolder.append(resizedImage)
		currentFolderLabels.append(index)
		currentFolder.append(croppedImage)
		currentFolderLabels.append(index)
		currentFolder.append(flippedImageH)
		currentFolderLabels.append(index)
		currentFolder.append(flippedImageV)
		currentFolderLabels.append(index)
		if laplaceImages:
			currentLaplaceImage = scImage.laplace(currentImage)
			currentFolder.append(currentLaplaceImage)
			currentFolderLabels.append(index)
	data.append(currentFolder)
	labels.append(currentFolderLabels)
	index = index + 1
print("DATA READING DONE")
index = 0
for i in labels:
	labels[index] = np_utils.to_categorical(i, 15)
	index = index + 1
print("LABELS WERE CHANGED")

#MAKE TRAINING AND TEST
Xtraining = []
Ytraining = []
#Xtest = []
#Ytest = []
index = 0
Esempio n. 34
0
import matplotlib.pyplot as plt

from skimage.feature import hog
from skimage import data, color, exposure
from scipy import misc
from PIL import Image
from resizeimage import resizeimage
from sklearn import svm
from sklearn.cross_validation import KFold

f = open('/Users/yanan/caffe/data/vehicle/train.txt')
data = []
label = []
feature = []
for line in f:
    data.append(line.split(' ')[0])
    label.append(line.split(' ')[1].strip())

for d in data:
    image = misc.imread(d)
    fd, hog_image = hog(image, orientations=8, pixels_per_cell=(4, 4),
                    cells_per_block=(1, 1), visualise=True)
    feature.append(fd)

kf = KFold(len(data), n_folds=10, shuffle=True)
for train_index, test_index in kf:
    tmp_feature = [feature[x] for x in train_index]
    tmp_label = [label[x] for x in train_index]
    test_feature = [feature[x] for x in test_index]
    test_label = [label[x] for x in test_index]
    clf = svm.SVC()
def proj3_22b():
    all_channels = []

    for channel in range(3):
        bg = plt.imread("data/trumpsou.jpeg") / 255
        #bg = exposure.equalize_hist(bg)
        bg = np.clip(bg + .2 , 0, 1)
        bg = bg[...,channel]

        source = ndimage.rotate(plt.imread("data/kavi.jpg"), -90) / 255
        source = exposure.equalize_hist(source)
        source = np.clip(source + .2, 0, 1)
        source = source[...,channel]

        #plt.imshow(bg, cmap="gray")
        #plt.show()
        #plt.imshow(source, cmap="gray")
        #plt.show()

        # How to trim the input image
        sx1 = 0                 # sx1 = 20
        sx2 = source.shape[0]   # sx2 = 450
        sy1 = 0                 # sy1 = 50
        sy2 = source.shape[1]   # sy2 = 360

        # How much to scale the input image down
        #K = .052
        K = .04

        # Top left corner on the source image in bg image frame
        #full_bgx1 = 100
        full_bgx1 = 135
        #full_bgy1 = 450
        full_bgy1 = 130

        # Corresponding bottom right corner
        full_bgx2 = int(full_bgx1 + (sx2 - sx1) * K)
        full_bgy2 = int(full_bgy1 + (sy2 - sy1) * K)

        bgx1 = full_bgx1 + 10
        bgy1 = full_bgy1 + 20
        bgx2 = full_bgx2 - 5
        bgy2 = full_bgy2 - 2


        #penguin = source[sx1:sx2,sy1:sy2]
        penguin = source
        penguin = imresize(penguin, (full_bgx2 - full_bgx1, full_bgy2 - full_bgy1)) / 255

        # print("AHHHHH", full_bgx1, full_bgx2, full_bgy1, full_bgy2)
        # print("SOURCE: ", penguin.shape)
        # print("BG: ", bg.shape)

        bg_with_full_source = bg.copy()
        bg_with_full_source[full_bgx1:full_bgx2, full_bgy1:full_bgy2] = penguin
        plt.imshow(bg_with_full_source, cmap="gray")
        plt.show()
        scipy.misc.imsave("output/final_original_kavitrump.jpg", bg_with_full_source)
        #exit(0)
        #plt.imshow(bg, cmap="gray")
        #plt.show()


        #exit(0)

        #plt.imshow(penguin)
        #plt.show()

        #bg[bgx1:bgx2, bgy1:bgy2] = penguin
        #plt.imshow(bg, cmap="gray")
        #plt.show()

        # exit(0)

        row_num = 0
        row = []
        col = []
        data = []
        b = []

        # where we're traversing
        start = (bgx1, bgy1)
        end = (bgx2, bgy2)
        source_shape = (bgx2 - bgx1, bgy2 - bgy1)



        for i in range(bgx1, bgx2):
            for j in range(bgy1, bgy2):
                # CHECK DOWN (i + 1)
                if i < bgx2 - 1:
                    idx1 = get_idx(i, j, start, end)
                    idx2 = get_idx(i + 1, j, start, end)

                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    row.append(row_num)
                    col.append(idx2)
                    data.append(-1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i+1, j])
                    row_num += 1
                # if its on the very last row, get the other i+1 equation
                elif i == bgx2 - 1:
                    idx1 = get_idx(i, j, start, end)

                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i+1, j] + bg[i+1,j])
                    row_num += 1

                # CHECK RIGHT (j + 1)
                if j < bgy2 - 1:
                    idx1 = get_idx(i, j, start, end)
                    idx2 = get_idx(i, j + 1, start, end)
                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    row.append(row_num)
                    col.append(idx2)
                    data.append(-1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i, j+1])
                    row_num += 1
                elif j == bgy2 - 1:
                    idx1 = get_idx(i, j, start, end)

                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i, j+1] + bg[i,j+1])
                    row_num += 1

                # CHECK UP (i - 1)
                if i > bgx1:
                    idx1 = get_idx(i, j, start, end)
                    idx2 = get_idx(i - 1, j, start, end)

                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    row.append(row_num)
                    col.append(idx2)
                    data.append(-1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i-1, j])
                    row_num += 1
                # if its on the very last row, get the other i+1 equation
                elif i == bgx1:
                    idx1 = get_idx(i, j, start, end)

                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i-1, j] + bg[i-1,j])
                    row_num += 1

                # CHECK UP (j - 1)
                if j > bgy1:
                    idx1 = get_idx(i, j, start, end)
                    idx2 = get_idx(i, j-1, start, end)

                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    row.append(row_num)
                    col.append(idx2)
                    data.append(-1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i, j-1])
                    row_num += 1
                # if its on the very last row, get the other i+1 equation
                elif j == bgy1:
                    idx1 = get_idx(i, j, start, end)

                    row.append(row_num)
                    col.append(idx1)
                    data.append(1)

                    b.append(bg_with_full_source[i,j] - bg_with_full_source[i, j-1] + bg[i,j-1])
                    row_num += 1





        row, col, data = np.array(row), np.array(col), np.array(data)

        A = csr_matrix((data, (row, col)))
        b = np.array([b]).T
        print("Constructed A matrix of shape: ", A.shape)
        print("Constructed b matrix of shape: ", b.shape)

        x = la.lsqr(A, b)
        y = x[0]
        print("Pixel values found: ", y.shape)
        final_source = np.reshape(y, source_shape)
        #plt.imshow(final_source, cmap="gray")
        #plt.show()

        bg_channel = bg.copy()
        bg_channel[bgx1:bgx2, bgy1:bgy2] = final_source
        #plt.imshow(bg_channel, cmap="gray")
        #plt.show()

        all_channels.append(bg_channel)

    final_im = np.empty((all_channels[0].shape[0], all_channels[0].shape[1], 3))
    for channel in range(3):
        final_im[:,:,channel] = np.clip(all_channels[channel], 0, 1)

    plt.imshow(final_im)
    plt.show()
    scipy.misc.imsave("output/final_kavitrump.jpg", final_im)
Esempio n. 36
0
def train_ai():
			
			data = []
			classID = []
			features = []
			features_temp_array = []
			
			'''
			#SIMPLECV
			#bows
			feature_extractors = []
			extractor_names = []
			# Training data set paths for classification(suppervised learnning)
			image_dirs = ['../coin_images/jheads/',
						  '../coin_images/jtails/',
						  '../coin_images/oheads/',
						  '../coin_images/otails/',
						  ]
			# Different class labels for multi class classification
			class_names = ['jhead','jtail','ohead', 'otail']
			
			
			#preprocess all training images
			for directory in image_dirs:
				for filename in glob.glob(directory + '/*.jpg'):
					print "Processing:", filename
					img = cv2.imread(filename)
					temp_img  = preprocess_houghlines (img, 100)
					temp_str = filename.rsplit('/')
					temp_str = temp_str[len(temp_str)-1]
					temp_str = directory + '/temp/' + temp_str
					print temp_str
					cv2.imwrite(temp_str, temp_img)
					#raw_input('press enter to continue : ')
			#sys.exit(-1)
			
			
			#build array of directories for bow
			#image_dirs2 = []
			#for directory in image_dirs:
			#	image_dirs2.append(directory + '/temp/')
			#print image_dirs2

			# Different class labels for multi class classification
			extractor_name = 'hue'
			if extractor_name == 'bow':
				feature_extractor = BOFFeatureExtractor() # feature extrator for bag of words methodology
				feature_extractor.generate(image_dirs,imgs_per_dir=40) # code book generation
			elif extractor_name == 'hue':
				feature_extractor = HueHistogramFeatureExtractor()
			elif extractor_name == 'morphology':
				feature_extractor = MorphologyFeatureExtractor()
			elif extractor_name == 'haar':
				feature_extractor = HaarLikeFeatureExtractor()
			elif extractor_name == 'edge':
				feature_extractor = EdgeHistogramFeatureExtractor()
			image_dirs2 = image_dirs
			#bow_features = BOFFeatureExtractor()
			#bow_features.generate(image_dirs2,imgs_per_dir=40, verbose=True) # code book generation
			#bow_features.generate(image_dirs2,imgs_per_dir=200,numcodes=256,sz=(11,11),img_layout=(16,16),padding=4 )
			#bow_features.save('codebook.png','bow.txt')

			#print "extractor_names:", extractor_names, feature_extractors
			# initializing classifier with appropriate feature extractors list
			#print type(bow_features), bow_features, bow_features.getFieldNames(), bow_features.getNumFields()
			#raw_input('bow saved...Enter : ')
			#bow_features = None
			
			#bow_features = BOFFeatureExtractor()
			#print type(bow_features), bow_features, bow_features.getFieldNames(), bow_features.getNumFields()
			#bow_features.load('bow.txt')
			#print type(bow_features), bow_features, bow_features.getFieldNames(), bow_features.getNumFields()
			feature_extractors.append(feature_extractor)
			#raw_input('bow loaded Enter : ')

			#extractor_names.append(extractor_name)
			
			classifier_name = 'naive'
			if classifier_name == 'naive':
				classifier = NaiveBayesClassifier(feature_extractors)
			elif classifier_name == 'svm':
				classifier = SVMClassifier(feature_extractors)
			elif classifier_name == 'knn':
				classifier = KNNClassifier(feature_extractors, 2)
			elif classifier_name == 'tree':
				classifier = TreeClassifier(feature_extractors)

			# train the classifier to generate hypothesis function for classification
			#print "image_dirs:", image_dirs, class_names
			classifier.train(image_dirs2,class_names,disp=None,savedata='features.txt',verbose=True)
			
			print 'classifier:', type(classifier), classifier
			raw_input('press enter to continue :')
			#pickle.dump( classifier, open( "coinvision_ai_model2.mdl", "wb" ),2 )
			#classifier.save('coinvision_ai_model.mdl')
			print 'classifier:', type(classifier), classifier
			#classifier = NaiveBayesClassifier.load('coinvision_ai_model.mdl')

			#raw_input('press enter to continue : let me try loading bow file')
			#classifier2 = NaiveBayesClassifier.load('coinvision_ai_model.mdl')
			#classifier2.setFeatureExtractors(feature_extractors)
			#print 'classifier2:', type(classifier2), classifier2
			#classifier.load("coinvision_ai_model.mdl")
			#classifier2.load('coinvision_ai_model.mdl')
			#print 'classifier:', type(classifier2), classifier2
			raw_input('press enter to continue : ')
			print 'testing ai:'
			test_images_path = "../coin_images/unclassified"
			extension = "*.jpg"

			if not test_images_path:
				path = os.getcwd() #get the current directory
			else:
				path = test_images_path

			directory = os.path.join(path, extension)
			files = glob.glob(directory)

			count = 0 # counting the total number of training images
			error = 0 # conuting the total number of misclassification by the trained classifier
			for image_file in files:
				new_image = Image(image_file)
				category = classifier.classify(new_image)
				print "image_file:", image_file + "     classified as: " + category
				if image_file[-9] == 't':
					if category == 'jhead' or category == 'ohead':
						print "INCORRECT CLASSIFICATION"
						error += 1
				if image_file[-9] == 'h':
					if category == 'jtail' or category == 'otail':
						print "INCORRECT CLASSIFICATION"
						error += 1
				count += 1
			# reporting the results
			print ' * classifier : ', classifier
			print ' * extractors :', extractor_names
			print ' *', error, 'errors out of', count
			raw_input('edned press enter to continue : ')
			return
			'''
		#try: 
			data_filename = 'coinvision_feature_data.csv'
			print 'reading features and classID: ', data_filename
			f_handle = open(data_filename, 'r')
			reader = csv.reader(f_handle)
			#read data from file into arrays
			for row in reader:
				data.append(row)

			for row in range(0, len(data)):
				#print features[row][1]
				classID.append(int(data[row][0]))
				features_temp_array.append(data[row][1].split(" "))

			#removes ending element which is a space
			for x in range(len(features_temp_array)):
				features_temp_array[x].pop()
				features_temp_array[x].pop(0)

			#convert all strings in array to numbers
			temp_array = []
			for x in range(len(features_temp_array)):
				temp_array = [ float(s) for s in features_temp_array[x] ]
				features.append(temp_array)

			#make numpy arrays
			features = np.asarray(features)
			#print classID, features 

			
			learner = milk.defaultclassifier(mode='really-slow')
			model = learner.train(features, classID)
			pickle.dump( model, open( "coinvision_ai_model.mdl", "wb" ) )
			

		#except:
			print "could not retrain.. bad file"
			
			from sklearn import svm
			model = svm.SVC(gamma=0.001, C=100.)
			model.fit(features, classID)
			pickle.dump( model, open( "coinvision_ai_model_svc.mdl", "wb" ) )
			
			from sklearn.neighbors import KNeighborsClassifier
			neigh = KNeighborsClassifier(n_neighbors=3)
			neigh.fit(features, classID)
			pickle.dump( neigh, open( "coinvision_ai_model_knn.mdl", "wb" ) )
			
			from sklearn.svm import LinearSVC
			clf = LinearSVC()
			clf = clf.fit(features, classID)
			pickle.dump( clf, open( "coinvision_ai_model_lr.mdl", "wb" ) )
			
			from sklearn.linear_model import LogisticRegression
			clf2 = LogisticRegression().fit(features, classID)
			pickle.dump( clf2 , open( "coinvision_ai_model_clf2.mdl", "wb" ) )
		
			return