Example #1
0
    def makeMatchSet(self, state_phenotype, exploreIter, elcs):
        state = state_phenotype.attributeList
        phenotype = state_phenotype.phenotype
        doCovering = True
        setNumerositySum = 0

        elcs.timer.startTimeMatching()
        #Matching
        for i in range(self.popSet.size):
            cl = self.popSet[i]
            if cl.match(state, elcs):
                self.matchSet = np.append(self.matchSet, i)
                setNumerositySum += cl.numerosity

                #Covering Check
                if elcs.env.formatData.discretePhenotype:
                    if cl.phenotype == phenotype:
                        doCovering = False
                else:
                    if float(cl.phenotype[0]) <= float(phenotype) <= float(
                            cl.phenotype[1]):
                        doCovering = False

        elcs.timer.startTimeMatching()
        #Covering
        while doCovering:
            print("Covering")
            newCl = Classifier(elcs, setNumerositySum + 1, exploreIter, state,
                               phenotype)
            self.addClassifierToPopulation(elcs, newCl, True)
            self.matchSet = np.append(self.matchSet, self.popSet.size - 1)
            doCovering = False
Example #2
0
def show(request, filter_id=0):
    """based on the current user's filer, rank the items in source, and show out.
		current one only show the default classifier(last one). Could be extended 
		to show different classifier.
	"""
    errors = []
    c = {}
    c['username'] = request.user.username
    c['filter_id'] = filter_id
    if filter_id == 0:
        user = auth.get_user(request)
        temp = user.profile.default_filter
        if user.profile.default_filter is None:
            errors.append('plase train a filter first!')
            return render_to_response('show.html', {
                'username': request.user.username,
                'errors': errors
            })
        else:
            classifier = Classifier(user=auth.get_user(request),
                                    id=user.profile.default_filter.id)
            classifier.load()
            items = Item.objects.all()
            p_label, p_acc, p_val = classifier.predict(items)
            tempList = map(lambda x, y: [x, y], items, p_val)
            tempList = sorted(tempList, key=itemgetter(1), reverse=True)
            c['items'] = map(lambda x: x[0], tempList[:20])

    return render_to_response('show.html', c)
    def build(self):
        self.bins_amount = int(self.discretization_textbox.get())
        print("number of bins: " + str(self.bins_amount))
        print("starting building model")
        #check bins value
        if (self.validBinValue(self.bins_amount)):
            #get Structure.text data
            self.structure = Structure.Structure(self.structure_path)
            self.structure.prepere_structure()

            self.train = Train.Train(self.train_path,
                                     self.structure.get_structure(),
                                     self.bins_amount)

            if (self.train.check_bin_max() <= int(
                    self.discretization_textbox.get())):
                tkMessageBox.showinfo("Alert",
                                      "Invalid discretization bins value")
                return

            self.train.clean_train()

            self.classifier = Classifier.Classifier(self.train, self.structure,
                                                    self.bins_amount,
                                                    self.folderPath)
            self.classifier.build_model()

            tkMessageBox.showinfo(
                "Naive Bayes Classifier",
                "Building classifier using train-set is done!")
Example #4
0
def saveClf():
    clf = Classifier()
    mass = 125
    clf.loadData("heavyTrainSet_DS_mass{}.npy".format(mass))

    # Extracting features.
    nComps = 50
    print "Extracting features from training data.."
    startExtractTime = time.time()
    percentVarCovered = clf.extractFeatsPCA(nComps)
    print "Original Image Size:", clf.imSet[0].shape
    print "Number of selected principal components:", nComps
    print "Percentage of variance covered:", percentVarCovered
    endExtractTime = time.time()
    extractTime = endExtractTime - startExtractTime
    print "Training data feature extraction time:", extractTime, "sec"
    print

    # Obtain classifier model and print the classification results on
    # training data.
    clf.model.fit(clf.featSet, clf.labelSet)
    predicts = clf.model.predict(clf.featSet)
    print "Classification results on training data (mass = {}):".format(mass)
    getScores(predicts, clf.labelSet, ["Double Chirp", "Not Double Chirp"])

    # Save model
    joblib.dump(clf, "svm_mass{}.joblib".format(mass))
def calc(datasetIndex, multiplierInt):
    csv = pd.DataFrame(columns=['dataset', 'bins', 'f1', 'zero-one'])
    exp = ((multiplierInt + 1) / 2)
    bins = math.ceil(2**exp)
    results = []
    for k in range(trials):
        dp = DataProcessor.DataProcessor(bin_count=bins)
        binnedDataset = dp.StartProcess(datasets[datasetIndex])
        N, Q, F, testData = train(binnedDataset)

        model = Classifier.Classifier(N, Q, F)
        classifiedData = model.classify(testData)

        stats = Results.Results()
        zeroOne = stats.ZeroOneLoss(classifiedData)
        macroF1Average = stats.statsSummary(classifiedData)
        datapoint = {
            'dataset': dataset_names[datasetIndex],
            'bins': bins,
            'f1': macroF1Average,
            'zero-one': zeroOne / 100
        }
        print(datapoint)
        csv = csv.append(datapoint, ignore_index=True)
        # trial = {"zeroOne": zeroOne, "F1": macroF1Average}
        # results.append(trial)
        # print(trial)
    data.append(csv)
Example #6
0
def build():
    if not entry1.get() == "":
        checkBinNum()
        #get sturucture file
        pathToStructure = entry1.get() + "\\Structure.txt"
        pathToStructure = pathToStructure.replace('/', '\\')
        try:
            structure = pd.read_csv(pathToStructure, index_col=False, sep='\t')
        except:
            popErrorMessage("Error- Empty Files!")

    #  df_structure=pd.DataFrame(structure)
    # print df_structure
    #get train set
        pathToTrainSet = entry1.get() + "\\train.csv"
        pathToTrainSet = pathToTrainSet.replace('/', '\\')
        try:
            _trainSet = pd.read_csv(pathToTrainSet)
        except:
            popErrorMessage("Error- Empty Files!")

        df_trainSet = pd.DataFrame(_trainSet)
        classifier = Classifier(structure, entry2)
        updateTrainSet, numericFeaturesArr = classifier.cleanData(
            pathToTrainSet, df_trainSet)
        globals()['trainSet'] = updateTrainSet
        #relase Classify botton
        classifyBut.config(state="normal")
        popErrorMessage("Building classifier using train-set is done!")
 def __getClassifier(self):
     if self.__isClassifierExists():
         cls = self.__loadModel()
     else:
         cls = Classifier()
     cls.SetLogger(self)
     return cls
Example #8
0
def main():
    print('---正在读取数据并降维---')
    data = np.empty([110, 10000], np.float32)
    for idx in range(110):
        image = Image.open('Data/s' + str(idx + 1) + '.bmp')
        data[idx] = np.reshape(image, [10000])
    file = open('Data/labels.txt')
    label = np.array(file.readline().strip('\n').split(','), np.int32)
    '''
    算法的调用
    '''
    data_reduced = mds_func(data)
    # data_reduced = isomap_func(data)
    # data_reduced = le_func(data)
    # data_reduced = lle_func(data)

    classifier = Classifier.Classifier(20)
    for repeat in range(500):
        for idx in range(110):
            if idx % 11 != 0:
                classifier.fit(data_reduced[idx], label[idx])
        sys.stdout.write('\r正在训练,已完成 %.1f%%' % (repeat * 100 / 500))
    sys.stdout.write('\r训练完毕,下面开始测试\n')
    correct_times = 0
    for idx in range(10):
        val = classifier.classify(data_reduced[idx * 11])
        print('第 %2d 次预测值:%d,真实值:%d' % (idx + 1, val, label[idx * 11]))
        if val == label[idx * 11]:
            correct_times += 1
    print('测试完毕,准确率:%.2f%%' % (correct_times * 100 / 10))
Example #9
0
 def __init__(self):
     super(VehicleDetector, self).__init__()
     # Sliding windows
     self.yStart = 400
     self.yStop = 650
     self.x_overlap = 0.65
     self.y_overlap = 0.75
     # Filter
     self.filterThreshold = 2
     self.filter = F.Filter(self.filterThreshold)
     # Print summary to check correct parameters
     self.Summary()
     # Sub-components
     self.renderer = R.Renderer()
     self.database = D.Database()
     cars, notcars = self.database.GetListOfImages()
     self.classifier = C.Classifier(cars,
                                    notcars,
                                    loadFromFile=True,
                                    database=self.database)
     # Output video parameters
     self.outputToImages = 0
     self.outputVideoName = self.database.GetOutputVideoPath()
     # Train classifier ?
     self.trainClassifier = 1
     # TODO: implement the loading
     # Bounding boxes
     self.bboxes = self.LoadSlidingWindows()
Example #10
0
 def buildClicked(self):
     attrs = Data.getAttributesDictionary(self.path + "\\Structure.txt")
     trainData = pandas.DataFrame.from_csv(self.path + "\\train.csv", index_col=None)
     processedData = Data(trainData=trainData, attributes=attrs, numOfBins=self.numOfBins)
     self.classifier = Classifier(data=processedData)
     self.classifyButton['state'] = 'normal'
     tkMessageBox.showinfo("Naive Bayes Classifier", "Building classifier using train-set is done!")
Example #11
0
 def __init__(self):
     super(Test, self).__init__()
     self.database = D.Database()
     cars, notcars = self.database.GetListOfImages()
     self.classifier = C.Classifier(cars, notcars, loadFromFile=True, database=self.database)
     self.renderer = R.Renderer()
     self.vehicleDetector = V.VehicleDetector()
Example #12
0
def predict(restID, userID):
    classifier = Classifier.Classifier()
    
    response = app.response_class(
            response=classifier.run(userID, restID),
            status=200,
            mimetype='application/json'
        )
        
    return response
Example #13
0
 def train(self):
     global Models
     name = self.eval_data.name
     if name in Models:
         self.trainer = Models[name]
     else:
         Models[name] = self.trainer
         self.trainer.train(self.train_data, persist=False)
         self.trainer.train_gist(self.train_data, persist=False)
     self.classifier = Classifier(self.trainer)
     return self
Example #14
0
def Main():
    dataReader = DataReader()
    allUserData = dataReader.loadData(
        "DSL-StrongPasswordData")  #loads all users data

    classifier = Classifier()
    scalar = 1.0
    scalarCap = 1.6

    dimDeviation = 1
    dimCap = 21

    while (dimDeviation < dimCap):
        print "testing dims: " + str(dimDeviation)
        for k in range(0, 50):
            correct_person_accuracy = []
            wrong_person_accuracy = []
            owner_index = k  # index for the user that is to be tested
            first_time = True  # temp variable for checking if first time creating test_data_wrong
            #print "testing for person "+str(k)+" created!"
            for i in range(0, 50):
                userDataRaw = allUserData[i]  #data from 1 user
                userData = dataReader.formatData(
                    userDataRaw
                )  #formats data (strips user and session ids etc), returns Matrix.
                if i == owner_index:
                    np.random.shuffle(
                        userData
                    )  # Shuffle to get data from different sessions
                    person1 = DataCluster(
                        userData[0:300],
                        scalar)  # creates the person to be tested
                    test_data_right = userData[300:]
                    # print test_data_right
                else:
                    if first_time:
                        test_data_wrong = userData
                        first_time = False
                    else:
                        test_data_wrong = np.concatenate(
                            (test_data_wrong, userData), axis=0)
            correct_person_accuracy.append(
                classifier.compare_all(person1, test_data_right, True,
                                       dimDeviation))
            wrong_person_accuracy.append(
                classifier.compare_all(person1, test_data_wrong, False,
                                       dimDeviation))

        print "False recognition rate: " + str(
            1 - np.mean(correct_person_accuracy))
        print "False acceptance rate: " + str(1 -
                                              np.mean(wrong_person_accuracy))
        #       scalar += 0.1
        dimDeviation += 1
Example #15
0
def testPCAFit():
    dat = np.load("heavyTrainSet_noDS.npy")
    # dat = dat[: 600]

    # clf = Classifier(svm.SVR(kernel="linear", gamma="auto"))
    clf = Classifier(svm.SVC(kernel="linear", gamma="auto", probability=True))

    # Loading data
    print "Loading training data.."
    clf.imSet, clf.labelSet = DataFactory.getTrainableArrays(dat)

    # Extracting features
    ncomps = 30
    print "Extracting features from training data.."
    startExtractTime = time.time()
    percentVarCovered = clf.extractFeatsPCA(ncomps)
    endExtractTime = time.time()
    extractTime = endExtractTime - startExtractTime
    print "Original Image Size:", clf.imSet[0].shape
    print "Number of selected principal components:", ncomps
    print "Percentage of variance covered:", percentVarCovered
    print "Training data feature extraction time:", extractTime, "sec"
    print

    numIns = len(clf.featSet)
    shuffIndices = range(numIns)
    # np.random.shuffle(shuffIndices)
    shuffFeats = clf.featSet[shuffIndices]
    shuffLabels = clf.labelSet[shuffIndices]
    confMat = np.array([[0, 0], [0, 0]])

    print "Start training.."
    clf.model.fit(shuffFeats, shuffLabels)
    print "Start predicting.."
    probs = clf.model.predict_proba(shuffFeats)
    assert probs.shape == (numIns, 2)

    for i, prob in enumerate(probs):
        if dat[shuffIndices[i]].hasDoubleChirp:
            if prob[0] > 0.5:
                confMat[0, 0] += 1
            else:
                confMat[0, 1] += 1
        else:
            if prob[0] <= 0.5:
                confMat[1, 1] += 1
            else:
                confMat[1, 0] += 1

    print "Training accuracy:", 1.0 * (confMat[0, 0] + confMat[1, 1]) / numIns
    print "Total number of fails:", confMat[0, 1] + confMat[1, 0]
    print "Confusion Matrix"
    printConfMat(confMat, ["DoubleChirp", "NotDoubleChirp"])
Example #16
0
def train(parsed_training_data):
    first_class = parsed_training_data[1][1]
    second_class = ""

    # build structure of details with feature names and build list of classifier features
    classifier_features = []
    classifier_details = []
    for feature in parsed_training_data[0][2:]:
        detail_list = [feature, 0, 0]
        classifier_details.append(detail_list)
        classifier_features.append(feature)

    # capture the two class names and count their totals
    class_counts = {}
    for row in parsed_training_data[1:]:

        # debug counter
        debug.run_counter("nb.train", 100)

        # count the occurrences of each class and add to the class counts dictionary
        class_name = row[1]
        if class_name in class_counts:
            class_counts[class_name] += 1
        else:
            second_class = class_name
            class_counts[class_name] = 1

        # increment the respective class count within the detail tuple
        for i in range(0, len(classifier_details)):
            if row[i + 2]:
                if class_name == first_class:
                    classifier_details[i][1] += 1
                else:
                    classifier_details[i][2] += 1

    # calculate the probability of each feature occurring based on its class occurring
    for detail_tuple in classifier_details:
        detail_tuple[1] = detail_tuple[1] / class_counts[first_class]
        detail_tuple[2] = detail_tuple[2] / class_counts[second_class]

    # create new instance of Classifier and populate with classifier details
    class_names_counts = ((first_class, class_counts[first_class]),
                          (second_class, class_counts[second_class]))
    classifier = Classifier.Classifier("nb", class_names_counts,
                                       classifier_features)
    for detail in classifier_details:
        classifier.add_classifier_detail(detail)

    # reset the debug counter
    debug.reset_counter("nb.train")

    return classifier
Example #17
0
    def build(self):
        try:
            data_structure = {}
            structure_file = open(self.folder_path.get() + "/Structure.txt",
                                  "r")
            structure_content = structure_file.readlines()
            structure_file.close()

            # get the structure of the model from the structure file
            for line in structure_content:
                initial_split = line.split(" ")

                classifiers = initial_split[2]

                if len(initial_split) > 3:
                    i = 3
                    while i < len(initial_split):
                        classifiers = classifiers + " " + initial_split[i]
                        i = i + 1

                if '{' in classifiers:
                    classifiers = classifiers.replace("\n", "")
                    classifiers = classifiers.replace("{", "")
                    classifiers = classifiers.replace("}", "")
                    classifiers = classifiers.split(",")
                else:
                    classifiers = ['NUMERIC']

                # add a new entry to the structure
                data_structure[line.split(" ")[1]] = {
                    'attributes': classifiers
                }

            # get the raw data from the train file
            train_data = pd.read_csv(
                filepath_or_buffer=self.folder_path.get() + "/train.csv")

            # create the classifier from the training file
            self.classifier = Classifier.Classifier(self.folder_path.get(),
                                                    data_structure,
                                                    int(self.bins.get()))
            self.classifier.build_model(train_data)

            # enable the "Classify" button to be pressed
            self.classify_button.config(state='normal')

            messagebox.showinfo(
                "Information", "Building classifier using train-set is done!")
        except IOError:
            messagebox.showerror("Error",
                                 "There was a problem reading the files!")
Example #18
0
def get_label(shop_id):
  shops = get_labeled_shops()
  oclf = Classifier.Classifier(shops)
  shop = Shop(shop_id)
  label = oclf.predict(shop.vector)
  try:
    if shop.budget_from_user_night_max < 2000:
      label[0] = 3.0
    elif shop.budget_from_user_night_max < 5000:
      label[1] = 3.0
    else:
      label[2] = 3.0
  except:
    label[1] = 2.0
  return label
Example #19
0
 def build(self):
     #try:
         self.train = pd.read_csv(self.entryPath.get() + "/train.csv")
         if self.validate(self.entryDiscBins.get()):
             # load train file, test file and structure file
             if (os.path.getsize(self.entryPath.get() + "/Structure.txt") == 0):
                 raise Exception("The structure file is empty")
             self.structureFile = open(self.entryPath.get() + "/Structure.txt")
             self.fileHandler = FilesHandler()
             self.structureDic = self.fileHandler.createStstructureDic(self.structureFile)
             self.dataCleaner = DataCleaner(self.structureDic, self.numOfBins)
             self.toLowerCase("train")
             self.train = self.dataCleaner.trainCleaning(self.train)
             self.classifier = Classifier(self.train, self.entryPath.get(), self.structureDic, self.numOfBins)
             self.wasBuilt = True
             tkMessageBox.showinfo("Build Message", "Building classifier using train-set is done!")
Example #20
0
def classify():
    global data
    classifier = Classifier(test=data[3],
                            structure=data[0],
                            train=data[1],
                            meta_data=data[2],
                            bins=bins_num)
    output = open(filename + "/output.txt", "a")
    i = 1
    for classification in classifier.classify():
        output.write(str(i) + " " + str(classification) + "\n")
        i += 1
    output.close()
    messagebox.showinfo("Naive Bayes Classifier", "Classification is done!")
    root.destroy()
    sys.exit(0)
Example #21
0
def main(argv=None):
    tf.reset_default_graph()
#.........................Placeholders for input image and labels...........................................................................................
    image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") #Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB
    GTLabel = tf.placeholder(tf.int32, shape=[None, 3], name="GTLabel")#Ground truth labels for training
  #.........................Build FCN Net...............................................................................................
    Net =  BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) #Create class for the network
    feature = Net.build(image)# Create the net and load intial weights
#......................................Get loss functions for neural net work  one loss function for each set of label....................................................................................................
    res = tf.placeholder(tf.float32, shape=[None, 3, 4, 512], name="input_image")
    c = C.Classifier(res)
    Loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=GTLabel,logits=c.classify(),name="Loss"))  # Define loss function for training

   #....................................Create solver for the net............................................................................................
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(Loss)
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
    TrainReader = Data_Reader.Data_Reader(Train_Image_Dir) #Reader for training data
    sess = tf.Session() #Start Tensorflow session
# -------------load trained model if exist-----------------------------------------------------------------
    print("Setting up Saver...")
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer()) #Initialize variables
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
#--------------------------- Create files for saving loss----------------------------------------------------------------------------------------------------------

    f = open(TrainLossTxtFile, "w")
    f.write("Iteration\tloss\t Learning Rate="+str(learning_rate))
    f.close()
#..............Start Training loop: Main Training....................................................................
    for itr in range(MAX_ITERATION):
        print "itr:", itr
        Images,  GTLabels = TrainReader.getBatch() # Load  augmeted images and ground true labels for training
        feed_dict = {image:Images}
        output = sess.run(feature, feed_dict=feed_dict)
        feed_dict = {res:output,GTLabel:GTLabels}
        _, loss = sess.run([optimizer,Loss], feed_dict=feed_dict) # Train one cycle
        print "loss is,", loss
# --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------
        if itr % 230 == 0 and itr>0:
            print("Saving Model to file in "+logs_dir)
            saver.save(sess, logs_dir + "model.ckpt", itr) #Save model

#......................Write and display train loss..........................................................................
        '''
Example #22
0
    def train(self, X, scalar):
        # standard deviation( could come in handy.)
        self.accepted_deviation = np.std(X, axis=0) * scalar
        #print self.accepted_deviation
        self.min = np.min(X)
        self.max = np.max(X)
        self.average = (self.max - self.min) / 2
        # mean value of every dimension
        self.mean = np.mean(X, axis=0)
        #         print("std: " + str(self.accepted_deviation))
        #         print("min: " + str(self.min))
        #         print("max: " + str(self.max))
        #         print("(max - min)/2: " + str(self.average))
        # print("mean: " + str(self.mean))

        classifier = Classifier()
        accuracy = 0
        accepted_accuracy = 0.8
        learning_rate = 1.05
 def __init__(self):
     self.isRunning = True
     self.frameNumber = 0
     self.linearSvc = Classifier.Classifier()
     self.linearSvc.trainEyeblink8()
     self.blinks = 0
     self.frames = []
     self.Xs = []
     self.Ys = []
     self.OutcomeValues = [0 for i in range(13)]
     self.interBlinkTimes = []
     self.blinkFramesGap = 0
     self.previousBlinkTimeStamp = 0
     self.nextBlinkTimeStamp = 0
     self.blinksPerMinute = 0
     self.interBlinksTime = 0
     self.avgInterBlinksTime = 0
     self.startTime = 0
     self.endTime = 0
     self.std = 0
Example #24
0
def Classify():
    # get sturucture file
    pathToStructure = entry1.get() + "\\Structure.txt"
    pathToStructure = pathToStructure.replace('/', '\\')
    try:
        structure = pd.read_csv(pathToStructure, index_col=False, sep='\t')
    except:
        popErrorMessage("Error- Empty File!")
    #create classifier & classify Test set
    classifier = Classifier(structure, entry2)
    classifier.classify(entry1.get(), trainSet)

    end = Tk()
    end.title("Naive Bayes Classifier")
    end.minsize(400, 300)
    infoLabel = Label(end, text="Classified Successfully!")
    infoLabel.pack()
    okBut = Button(end, text="OK", command=exit)
    okBut.pack()
    end.mainloop()
Example #25
0
 def _train_data(self, iden, res=None, res_info=None):
     if res == None:
         import cPickle as pickle
         strs = 'classifier/train_res_' + iden + '.txt'
         f = open(strs)
         strs = 'classifier/train_info_' + iden + '.txt'
         f2 = open(strs)
         res = pickle.load(f)
         f.close()
         res_info = pickle.load(f2)
         f2.close()
     else:
         (res, res_info) = self._fanhua_extract(iden)
     self.c = Classifier.Classifier(test=False,
                                    type='GradientBoostingClassifier',
                                    vec='featurehash',
                                    genre='n_dict',
                                    identify=iden)
     #self.c = Classifier.Classifier(test=False,type='AdaBoostClassifier',vec='featurehash',genre='n_dict',identify=iden)
     #self.c = Classifier.Classifier(test=False,type='gaussiannb',vec='union',genre='n_dict',identify=iden)
     #self.c = Classifier.Classifier(test=False,vec='dictvec',genre='n_dict',identify=iden)
     #self.c = Classifier.Classifier(type='svc',test=False,vec='featurehash',genre='n_dict',identify=iden)
     self.c.test_train_indri(res, res_info)
Example #26
0
def testClf():
    clf = joblib.load("svm_mass125.joblib")
    mock = Classifier()
    mass = 200
    mock.loadData("heavyTrainSet_DS_mass{}.npy".format(mass))

    # Extracting features.
    nComps = 50
    print "Extracting features from training data.."
    startExtractTime = time.time()
    percentVarCovered = mock.extractFeatsPCA(nComps)
    print "Original Image Size:", mock.imSet[0].shape
    print "Number of selected principal components:", nComps
    print "Percentage of variance covered:", percentVarCovered
    endExtractTime = time.time()
    extractTime = endExtractTime - startExtractTime
    print "Training data feature extraction time:", extractTime, "sec"
    print

    # Get classification scores.
    predicts = clf.model.predict(mock.featSet)
    print "Classification results with mass {}:".format(mass)
    getScores(predicts, mock.labelSet, ["Double Chirp", "Not Double Chirp"])
Example #27
0
def predict(imagebatch):
    tf.reset_default_graph()
    logs_dir = "/Users/anekisei/Documents/Spine_project_horizontal/classifier/logs/"  # "path to logs directory where trained model and information will be stored"
    Image_Dir = "/Users/anekisei/Documents/Spine_project_vertical/test_images/"  # Test image folder
    model_path = "/Users/anekisei/Documents/Spine_project_vertical/FCN_segment/Model_Zoo/vgg16.npy"  # "Path to pretrained vgg16 model for encoder"
    image = tf.placeholder(
        tf.float32, shape=[None, None, None, 3], name="input_image"
    )  # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB

    # -------------------------Build Net----------------------------------------------------------------------------------------------
    Net = BuildNetVgg16.BUILD_NET_VGG16(
        vgg16_npy_path=model_path)  # Create class instance for the net
    feature = Net.build(image)
    res = tf.placeholder(tf.float32,
                         shape=[None, 3, 4, 512],
                         name="input_image")
    c = C.Classifier(res)
    logits = c.classify()
    sess = tf.Session()  #Start Tensorflow session
    sess.run(tf.global_variables_initializer())
    #print("Setting up Saver...")
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if train model exist restore it
        print "Restore model from:", ckpt.model_checkpoint_path
        saver.restore(sess, ckpt.model_checkpoint_path)
    #print("Model restored...")
    else:
        print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path +
              " See Train.py for creating train network ")
        sys.exit()
    feed_dict = {image: imagebatch}
    output = sess.run(feature, feed_dict=feed_dict)
    feed_dict = {res: output}
    logits = sess.run(logits, feed_dict=feed_dict)  # Train one cycle
    predicts = np.argmax(logits, axis=1)
    return predicts
Example #28
0
def Kernel(width, height, patterns):
    system = System()
    system.fun('~', 'call', CALL)
    system.fun('>', 'more', MT)
    system.fun('<', 'less', LT)
    system.fun('+', 'plus', ADD)
    system.fun(':', '.set', SET)
    system.fun('?', '.if', None)
    system.fun('(', '_open', None)
    system.fun(')', '_close', None)
    system.var('w', width)
    system.var('h', height)
    system.var('x', 0)
    system.var('y', 0)
    system.var('tx', 0)
    system.var('ty', 0)
    system.var('dx', 0)
    system.var('dy', 0)
    system.var('def', 0)
    system.var('inc', 1)
    system.var('dec', -1)
    system.var('input', None)
    system.var('output', None)
    system.var('classifier', Classifier(patterns))
    # system.var('up', (0, 1))
    # system.var('down', (0, -1))
    # system.var('left', (-1, 0))
    # system.var('right', (1, 0))

    system.inputs = ['tx', 'ty', 'input']
    system.script = [
        '((x > tx) ? (dx : dec))', '((x < tx) ? (dx : inc))',
        '((y > ty) ? (dy : dec))', '((y < ty) ? (dy : inc))', '(x : (x + dx))',
        '(y : (y + dy))', '(dx : def)', '(dy : def)',
        '(output : (classifier ~ input))'
    ]
    return system
Example #29
0
 def __init__(self, test=False):
     #self.t = TextSim()
     self.rels = {}
     self.rels["erzi"] = [u"儿子"]
     self.rels["nver"] = [u"女儿"]
     self.rels["nanyou"] = [u"男友", u"男朋友"]
     self.rels["nvyou"] = [u"女友", u"女朋友"]
     self.rels["muqin"] = [u"妈妈", u"母亲"]
     self.rels["fuqin"] = [u"爸爸", u"父亲"]
     self.rels["qizi"] = [u"妻子", u"老婆", u"夫人", u"夫妇"]
     self.rels["zhangfu"] = [u"丈夫", u"老公", u"夫妇"]
     #TODO 添加新关系时,需要加入新的关系引导词  self.rels["xinguanxi"]=[u"xinguanxi"]
     self.q = re.compile(r'\\')
     self.p = re.compile('<[^>]+>')
     self.b = re.compile(
         '(http|ftp|https)?(:\/\/)?([\w\-_]+\.)+([\w\-:_]+/)([\w\-\.,@^=%&amp;:/~\+#]+)?'
     )
     self.test = test
     if test is True:
         self.c = Classifier.Classifier(type='GradientBoostingClassifier',
                                        vec='featurehash',
                                        genre='n_dict',
                                        identify='muqin')
     pass
Example #30
0
import tensorflow as tf
import keras
from tensorflow.python.keras.preprocessing import image
from keras.applications.inception_v3 import *

from Classifier import *
from FeatureExtract import *

import cv2
import pickle

DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'

mm = FeatureExtractor()
model = mm.CompleteModel
gardien = Classifier()

#vidcap = cv2.VideoCapture('bigbunny.mp4')
success = 1
# False;#True
while success:
    #success,image = vidcap.read()
    image = mm.LoadImage("image.jpg")
    #ff = mm.GetFeatureMap(inn)
    inn = mm.ProcessImage(image)

    print(inn, inn.shape)
    ff = mm.GetFeatureMap(inn)
    print(ff, ff.shape)
    success = False
vv = mm.LoadProcessedVideo('bigbunny.mp4')