Exemplo n.º 1
0
    def test08(self):
        self.kb = KnowledgeBase()
        file = 'dataFiles/diy_kb.txt'
        data = tokenize_file(file)
        classify(self.kb, data)

        print("No data found because of test08, (txt files empty)")
        # test/print inference engine nodes here
        """
        for f in self.kb.facts:
            print(f)
        for r in self.kb.rules:
            print(r)
        """

        self.sb = SuggestionBase(self.kb)
        file = "dataFiles/diy_suggestions.txt"
        #ensure that the txt file has each subgoal on a separate line
        data = tokenize_suggestion_file(file)
        classify_suggestions(self.sb, data)

        # ask question here
        q = Question('question1', 'Insert question here',
                     "Insert question (text version) here?")
        ans = self.sb.evaluate(q)
Exemplo n.º 2
0
    def test05(self):
        self.kb = KnowledgeBase()
        file = 'dataFiles/inferences.txt'
        data = tokenize_file(file)
        classify(self.kb, data)

        expected1 = Fact(['isa', 'luke', 'mortal'])
        expected2 = Fact(['happy', 'luke'])
        expected3 = Fact(['perfect', 'luke'])

        check = False
        if expected1 in self.kb.facts and expected2 in self.kb.facts and expected3 in self.kb.facts:
            check = True
        self.assertTrue(check)

        check = False
        expected1 = Fact(['above', 'block1', 'block3'])
        expected2 = Rule([['above', 'block2', '?z']],
                         ['above', 'block1', '?z'])
        expected3 = Rule([['above', 'block3', '?z']],
                         ['above', 'block2', '?z'])
        if expected1 in self.kb.facts and expected2 in self.kb.rules and expected3 in self.kb.rules:
            check = True
        self.assertTrue(check)
        """ Add additional facts and rules to knowledgebase"""
        check = False
        self.kb.add(Fact(['isa', 'dave', 'human']))
        expected1 = Fact(['isa', 'dave', 'mortal'])
        expected2 = Fact(['happy', 'dave'])
        expected3 = Fact(['perfect', 'dave'])
        #print(expected1)
        if expected1 in self.kb.facts and expected2 in self.kb.facts and expected3 in self.kb.facts:
            check = True
        self.assertEqual(check, True)
Exemplo n.º 3
0
    def evaluate(self, test_data, log=True):
        correct = 0
        for test_case in test_data:
            if util.classify(self.run(test_case[0])) == util.classify(
                    test_case[1]):
                correct += 1
        error = 1 - (correct / len(test_data))

        if log:
            print('Network has an error of {:.2%}'.format(error))
Exemplo n.º 4
0
    def test02(self):
        self.kb = KnowledgeBase()
        file = "dataFiles/initial_test.txt"
        data = tokenize_file(file)
        classify(self.kb, data)
        self.assertTrue(isinstance(self.kb.facts[0], Fact))
        self.assertTrue(isinstance(self.kb.facts[1], Fact))
        self.assertTrue(isinstance(self.kb.facts[2], Fact))
        self.assertTrue(isinstance(self.kb.rules[0], Rule))
        self.assertTrue(isinstance(self.kb.rules[1], Rule))

        self.assertEqual(self.kb.facts[0].predicate, "isa")
        self.assertEqual(self.kb.facts[0].terms, ["luke", "human"])

        self.assertEqual(self.kb.rules[0].predicate[0], "on")
        self.assertEqual(self.kb.rules[0].vars[0], ["?x", "table"])
        self.assertEqual(self.kb.rules[0].predicate[1], "on")
        self.assertEqual(self.kb.rules[0].vars[1], ["?y", "?x"])
        self.assertEqual(self.kb.rules[0].asserted,
                         ["3-Tower", "?x", "?y", "?z"])
Exemplo n.º 5
0
    def test07(self):
        self.kb = KnowledgeBase()
        file = 'dataFiles/data_for_suggestions.txt'
        data = tokenize_file(file)
        classify(self.kb, data)

        self.sb = SuggestionBase(self.kb)
        file = "dataFiles/suggestions.txt"
        #ensure that the txt file has each subgoal on a separate line
        data = tokenize_suggestion_file(file)
        classify_suggestions(self.sb, data)

        q = Question(
            'question2', 'DurationFeedHumans moon ?time',
            "How long can the moon feed humans if it was made of green cheese?"
        )
        ans = self.sb.evaluate(q)
        ans = float(ans)
        withinRange = False
        if ans > 1e12 and ans < 1e13:
            withinRange = True
        self.assertTrue(withinRange)
Exemplo n.º 6
0
    def test03(self):
        self.kb = KnowledgeBase()
        file = 'dataFiles/data.txt'
        data = tokenize_file(file)

        classify(self.kb, data)

        b1 = self.kb.ask("(ON ?X TABLE)")
        b2 = self.kb.ask(
            "(populationOfRegion ?Y ((UnitOfCountFn HomoSapiens) 6560608))")
        b3 = self.kb.ask(
            "(relationAllInstance outerRadius AutomobileTire (?Z 0.5 0.9))")

        self.assertEqual(b1[0].vars[0], "?X")
        self.assertEqual(b1[0].constants[0], "D")

        self.assertEqual(b2[0].vars[0], "?Y")
        self.assertEqual(b2[0].constants[0], "Honduras")

        self.assertEqual(b3[0].vars[0], "?Z")
        self.assertEqual(b3[0].constants[0], "Meter")

        b4 = self.kb.ask("(ON ?first ?second)")

        self.assertEqual(b4[0].vars, ["?first", "?second"])
        self.assertEqual(b4[0].constants, ["D", "TABLE"])

        self.assertEqual(b4[1].vars, ["?first", "?second"])
        self.assertEqual(b4[1].constants, ["E", "D"])

        self.assertEqual(b4[2].vars, ["?first", "?second"])
        self.assertEqual(b4[2].constants, ["F", "E"])
        self.assertEqual(len(b4), 3)

        b5 = self.kb.ask("(this_does_not_exist ?x ?y)")
        self.assertFalse(b5)
Exemplo n.º 7
0
def NN(dataSet, T, Z1=0):
    """ Implemention of NN classify

        Input:

            dataSet (list)  : Input dataset
            T (float)       : Distance threshold
            Z1 (int))   : Index of first clustering center
        Output:

    """

    zs, zsi = genClusters(dataSet, T, Z1)

    clusteri, clusterz = util.classify(dataSet, zs, zsi)
    return clusteri, clusterz
Exemplo n.º 8
0
def validate(args):
    
##################
    norm_map = args.norm.partition('-')[0]
    if 'no' == args.norm.partition('-')[2]:
       m_uniform ='0'
    else:
       m_uniform ='fair'

    if args.model=="default":
        if args.xy_size==0:
            args.model="checkpoint/{}/no_xy/netG_model_epoch_250.pth".format(args.dataset)
            loc="no_xy/"
        elif args.xy_size==2 and norm_map=='cycle':
            args.model="checkpoint/{}/rot_xy/netG_model_epoch_250.pth".format(args.dataset)
            loc = "rot_xy/"
        else:
            args.model="checkpoint/{}/netG_model_epoch_250.pth".format(args.dataset)            
            loc = "{}/".format(args.norm)
    elif args.model=="best":
        args.model="best_model/netG_model_epoch_200_3D+2D*5D.pth"
        loc = 'best'
    elif os.path.exists(args.model):
        True #args.model = args.model
    else:
        args.model="checkpoint/{}/{}/netG_model_epoch_{}.pth".format(args.dataset, norm_map, args.it)
        if not os.path.exists(args.model):
            args.model="checkpoint/{}/pix2pix/netG_model_epoch_{}.pth".format(args.dataset, args.it)
        loc=args.norm
    img_size = (args.img_rows, args.img_cols)
    val_dir = "dataset/{}/val/{}val_image_{}/".format(args.dataset, loc, args.rotated)
    if args.xy_size>0 and norm_map=='':
        raise RuntimeError('Please choose norm parameter for coordconv e.g. linear, cycle, sigmoid')
##################
    print(args)
    np.save("dataset/{}/parameters_val_{}_{}_{}".format(args.dataset, args.xy_size, m_uniform, args.it), args)
    print('===> Loading datasets')
    root_path = "dataset/"
    evaluation_set = get_evaluation_set(root_path + args.dataset, mode = args.mode, xy=args.xy_size, img_size=(args.img_cols, args.img_rows), norm=args.norm, rotated=args.rotated, val=True)
    evaluation_data_loader = DataLoader(dataset=evaluation_set, num_workers=args.threads, batch_size=args.batch_size, shuffle=False)

#####################

    # Setup Model
    netG_model = torch.load(args.model)
    #netG_model.eval()
    if torch.cuda.is_available():
        netG_model = netG_model.cuda()
    #diagnose_network(netG_model)

####################
    gts, preds = [], []
    for i, (images, labels) in tqdm(enumerate(evaluation_data_loader)):
        if torch.cuda.is_available():
            images = Variable(images.cuda(0), volatile=True)
            #labels = Variable(labels.cuda(0), volatile=True)
        else:
            images = Variable(images, volatile=True)
            #labels = Variable(labels, volatile=True)


        with torch.no_grad():
            outputs = netG_model(images)
            outputs = outputs.cpu()
            out_img = outputs.data[0]
            #print(np.unique(out_img), 'out')
            if m_uniform!='0':
                pred = encode_img_ids(out_img)
            else:
                pred = classify(outputs.cpu().data[0], m=m_uniform)	#it is realy slow
            #pred = trans_classe(out_img)
            gt = encode_ĺbl_ids(labels[0].numpy(), m=m_uniform)	#labels.data.cpu().numpy()	# 
            ##gt = classify(labels[0], img=False)       
	
        if args.save:
            save_val(decode_ids(pred, m=m_uniform), decode_ids(gt, m=m_uniform), val_dir, "img_{}.png".format(i), args.alpha)
            #print('saving ...', np.unique(pred), np.unique(gt))

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)

    val_list= np.array([[gts, preds]])
    np.save("{}/validations_tg_pred_{}".format(val_dir, args.rotated), val_list)
    score, class_iou = scores(gts, preds, n_class=args.n_classes)

    for k, v in score.items():
        print(k, v)

    for i in range(args.n_classes):
        print(i, class_iou[i])

#################
    np.save("{}/parameters_{}".format(val_dir, args.rotated), args)
    np.save("{}/result.txt".format(val_dir), np.array([score, class_iou]))    
Exemplo n.º 9
0
test = pca.transform(test)

# # Obtain best parameters
# num_processor = -1
# util.obtain_parameters('RF', train, label, num_processor)
# util.obtain_parameters('ET', train, label, num_processor)
# util.obtain_parameters('SVM', train, label, num_processor)
# util.obtain_parameters('KNN', train, label, num_processor)
# util.obtain_parameters('LR', train, label, num_processor)

# Training classifier

'''
classifier abbreviations:
RF - Random Forest
ET - Extra Trees
SVM - Support Vector Machine
KNN - K Nearest Neighbors
LR - Logistic Regression
'''

classifier_name = 'SVM'
print('Training and prediction with %s classifier...' %classifier_name)
prediction = util.classify(classifier_name, train, label, test)

# Exporting solution
index = list(range(1,len(test) +1))
print('Writing data to CSV file...')
df_prediction = pd.DataFrame(data = prediction, index = index, columns = ['Solution'])
df_prediction_csv = df_prediction.to_csv('prediction_%s.csv' % classifier_name, index_label = ["Id"])
Exemplo n.º 10
0
    def test04(self):
        self.kb = KnowledgeBase()
        file = 'dataFiles/inDepthFactsRules.txt'
        data = tokenize_file(file)

        classify(self.kb, data)

        # f1 was inferred from a fact and rule in the knowledge base
        f1 = Fact(['isa', 'D', 'bottomBlock'])
        shouldBeTrue = False
        if f1 in self.kb.facts:
            shouldBeTrue = True
            index = self.kb.facts.index(f1)
        self.assertTrue(shouldBeTrue)
        # check to see if the inferred fact (f1) has antecedents
        actual_fact = self.kb.facts[index]
        j1 = actual_fact.antecedent[0].fact
        j2 = actual_fact.antecedent[0].rule
        shouldBeTrue = False
        f = Fact(['ON', 'D', 'TABLE'])
        r = Rule([['ON', '?x', 'TABLE']], ['isa', '?x', 'bottomBlock'])

        if f == j1 and r == j2:
            shouldBeTrue = True
        self.assertTrue(shouldBeTrue)

        # check to see if the antecedents support the given fact (f1)
        support1 = j1.consequent[1]
        support2 = j2.consequent[0]
        shouldBeTrue = False
        #both support1 and support2 should point to the original inferred fact (ON D TABLE)
        if f1 == support1 and f1 == support2:
            shouldBeTrue = True
        self.assertTrue(shouldBeTrue)

        # Fact(['ON', 'D', 'TABLE']) also supports another fact/rule!
        # Since it satisfies the first part of the rule:
        # (rule (ON ?x TABLE)
        #  (rule (ON ?y ?x)
        #    (rule (ON ?z ?y) (assert! '(3-Tower ,?x ,?y ,?z)))))
        # it will assert a new rule with the appropriate new bindings set
        # expected:
        # (rule (ON ?y D)
        #   (rule (ON ?z ?y))
        #     (assert! '(3-Tower D ,?y ,?z)))
        support3 = j1.consequent[0]
        r = Rule([['ON', '?y', 'D'], ['ON', '?z', '?y']],
                 ['3-Tower', 'D', '?y', '?z'])

        # Inferred facts and rules can infer more facts and rules ...
        f1 = Fact(['thereIsOneBlockBetween', 'C', 'TABLE'])
        f2 = Fact(['thereIsOneBlockBetween', 'B', 'D'])
        f3 = Fact(['thereIsOneBlockBetween', 'A', 'C'])
        shouldBeTrue = False
        if f1 in self.kb.facts and f2 in self.kb.facts and f3 in self.kb.facts:
            shouldBeTrue = True
        self.assertTrue(shouldBeTrue)

        f4 = Fact(['thereAreThreeBlocksBetween', 'A', 'TABLE'])
        shouldBeTrue = False
        if f4 in self.kb.facts:
            shouldBeTrue = True
        self.assertTrue(shouldBeTrue)
Exemplo n.º 11
0
    def test06(self):
        self.kb = KnowledgeBase()
        file = 'dataFiles/data_for_suggestions.txt'
        data = tokenize_file(file)
        classify(self.kb, data)

        self.sb = SuggestionBase(self.kb)
        file = "dataFiles/suggestions.txt"
        #ensure that the txt file has each subgoal on a separate line
        data = tokenize_suggestion_file(file)
        classify_suggestions(self.sb, data)

        original_suggestions = copy.deepcopy(self.sb)

        # This question is the first test. The suggestion fetches for its subgoals
        # if the subgoals are more suggestions. If not, it will fetch from facts.
        # Since there is a fact that satisfies this suggestion, the answer is returned
        q = Question('question0', 'humanPopulation ?p',
                     "what is the human population?")
        ans = self.sb.evaluate(q)
        self.assertEqual(float(ans), 7000000000.0)

        # This question tests the overall functionality of "evaluate()"
        self.sb = original_suggestions  # restore original version of sb
        q = Question('question1', 'CaloriesIn moon ?count',
                     'How many calories in object?')
        ans = self.sb.evaluate(q)
        ans = float(ans)

        # Checking the evaluation process...

        # "CaloriesIn" will call the subgoals "volumeOfObject" and "caloriesInCubicMeter"
        # "volumeOfObject" is another suggestion that retrieves a radius then evaluates
        # the volume of sphere formula: 4.187 * (r^3)
        # the result is then put into ?vol, hence ?vol is replaced with the actual value:
        eval1 = [
            '2.1943324029411e+19', 'TimesFn', '4.187', 'TimesFn', '?radius',
            '?radius', '?radius'
        ]
        self.assertEqual(self.sb.eval[0], eval1)

        # Once "volumeOfObject moon ?vol" -> ?vol = 2.1943324029411e+19,
        # the next subgoal is checked. "caloriesInCubicMeter" is next.
        # this is another defined suggestion in the "suggestions.txt" file.
        # This suggestion has its own subgoals:
        # 1. (madeOf ?obj greencheese) - this will return (?obj = moon)
        # 2. (caloriesPerKilogram greencheese ?cpk) - this will search for a suggestion
        # named "caloriesPerKilogram". Since none is found, it will then search
        # facts (from data_for_suggestions.txt)
        # in the knowledgebase to "ask()" if a fact exists that matches the description.
        # This succeeds and returns (?cpk = 4000)
        # Then it evaluates the next subgoal, "densityOfGreenCheese" which will
        # go through the same logic as the previous subgoal. It will return (?dc = 947)
        # The suggestion multiplies ?cpk and ?dc -> 4000 * 947 = 3788000.
        eval2 = ['3788000.0', 'TimesFn', '?cpk', '?dc']
        self.assertEqual(self.sb.eval[1], eval2)

        # Now that all of the "CaloriesIn" subgoals have been evaluated,
        # with ?vol = 2.19...e+19 and ?cal = 3788000, we can move on to its
        # result-step. According to the the suggestions txt file, ?vol and ?cal
        # get multiplied to return the final ?count. 2.19e+19 * 3788000 =
        # 8.31e+25
        eval3 = [
            '8.312131142340886e+25', 'TimesFn', '2.1943324029411e+19',
            '3788000.0'
        ]
        self.assertEqual(self.sb.eval[2], eval3)

        # double checking to make sure the answer is roughly equivalent to
        # the answer given from FIRE reasoning engine.
        withinRange = False
        if ans > 1e25 and ans < 1e26:
            withinRange = True
        self.assertTrue(withinRange)
Exemplo n.º 12
0
# D is now the transpose, so training points as columns
D = X.T
A = R.dot(D)

print('done loading finally')

# get alphas for each of the 32 images of individual 33
# via basis pursuit

λ = .001
class_ests = []
alphas = []
for row in Z:
    alpha = _get_alpha(row, R, A, λ)
    alphas.append(alpha)
    class_est = classify(D, alpha, row, lab)
    class_ests.append(class_est)

correct = sum((x == 33 for x in class_ests))

from concentration import concentration

z2alphas = [_get_alpha(row, R, A, .001) for row in Z2]

conc = [concentration(alpha, lab) for alpha in z2alphas]

outliers_recognized = [c > .5 for c in conc]

print("the following outliers (individual 38) were (erronously) recognized")
print("(note: these are in order in the directory)")
print([i for i, c in enumerate(outliers_recognized) if c])
Exemplo n.º 13
0
                    rgb=rgb,
                    rotated=opt.rotated)
    img = transform(img1)
    #print(np.unique(img))
    input = Variable(img, volatile=True).view(1, -1, opt.img_size[1],
                                              opt.img_size[0])

    if opt.cuda:
        netG = netG.cuda()
        input = input.cuda()

    out = netG(input)
    out = out.cpu()
    out_img = out.data[0]
    if not os.path.exists(os.path.join("result", opt.dataset)):
        os.mkdir(os.path.join("result", opt.dataset))
    save_img(out_img, "result/{}/t_{}".format(opt.dataset, image_name),
             opt.img_size)
    save_img_np(rm_2channel(img1, sz=opt.img_size),
                "result/{}/o_{}".format(opt.dataset, image_name), opt.img_size)

    #This classification(classify,encode_img_ids) will be updated to run on gpu
    if opt.m == '0':
        save_img_np(decode_ids(classify(out_img, m=opt.m), m=opt.m),
                    "result/{}/{}".format(opt.dataset,
                                          image_name), opt.img_size)  #quadtree
    else:
        save_img_np(decode_ids(encode_img_ids(out_img)),
                    "result/{}/{}".format(opt.dataset,
                                          image_name), opt.img_size)
Exemplo n.º 14
0
def MinMax(dataSet, thita=1 / 2, Z1=0):
    zs, zsi = genClusters(dataSet, thita, Z1)
    clusteri, clusterz = util.classify(dataSet, zs, zsi)
    return clusteri, clusterz