Example #1
0
def calc_image_stats(pos_db, neg_db, samples=None):
    ret = MixedRetriever(Retriever(pos_db), Retriever(neg_db))
    if samples is None:
        count = ret.get_length()
    else:
        count = samples

    ratio = 1. / 2
    sess = tf.Session()
    mean = np.zeros((192, 192, 3))
    std = np.zeros((192, 192, 3))
    k = 1
    n = 200
    # Calc mean
    print("Calculating mean")
    for i in xrange(0, ret.get_length(), ret.get_length() // count):
        r, l, g = ret.get_random_pairs(sess, n / 2, ratio)
        mean = mean * k / (k + n) + (np.sum(r, axis=0) +
                                     np.sum(l, axis=0)) / (k + n)
        k += 1
    #cv2.imshow("mean", mean)
    #cv2.waitKey(0)
    k = 1
    # Calc std
    mean_repeat = np.expand_dims(mean, 0).repeat(n, 0)
    print("Calculating standard deviation")
    for i in xrange(0, ret.get_length(), ret.get_length() // count):
        r, l, g = ret.get_random_pairs(sess, n / 2, ratio)
        arr = np.concatenate((r, l), axis=0)
        a = (np.sum(np.square(arr - mean_repeat), axis=0)) / (k + n)
        std = std * k / (k + n) + a / (k + n)
    #cv2.imshow("std", std)
    #cv2.waitKey(0)

    return mean, std
Example #2
0
def main():
  TRAIN_DBS, VAL_DBS, TEST_DBS, TRAIN_N_DBS, VAL_N_DBS, TEST_N_DBS = \
      load_dbs(sys.argv[1], False)
  ret = Retriever(TRAIN_DBS)
  sess = tf.Session()
  cv2.namedWindow("image", cv2.WINDOW_NORMAL)
  cv2.resizeWindow("image", 600, 1200)
  l = 1000
  div = ret.get_length() / l
  pairs = []
  for i in range(l):
    img1, img2 = ret.get_image_pair(i*div + 1, sess)
    #img1, img2 = ret.get_random_pair(sess)
    img2 = np.squeeze(img2.astype(np.uint8))
    img1 = np.squeeze(img1.astype(np.uint8))
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
    #cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
    #cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)

    #cv2.imwrite("image1.png", img1)
    #cv2.imwrite("image2.png", img2)

    ss = np.concatenate((img1, img2), axis=1)
    cv2.imshow("image", ss)
    val = cv2.waitKey(0)
    if val == 27:
      break
  cv2.destroyWindow("image")
  sess.close()
Example #3
0
def generate_snippet(file_name):
    relevant_list = open('cacm.rel', 'r')
    query_relevant = {}
    for line in relevant_list.readlines():
        words = line.split()
        if query_relevant.has_key(words[0]):
            doc_no = words[2][5:]
            doc_no = str(doc_no)
            doc = 'CACM-' + (4 - len(doc_no)) * '0' + doc_no
            query_relevant[words[0]].append(doc)
        else:
            query_relevant[words[0]] = []
            doc_no = words[2][5:]
            doc = 'CACM-' + (4 - len(doc_no)) * '0' + doc_no
            query_relevant[words[0]].append(doc)

    #Get query_dict from the file cacm.query.txt
    f = open('cacm.query.txt', 'r')
    soup = BeautifulSoup(f.read(), 'html.parser')
    f.close()
    rawquery_dict = {}
    for i in range(64):
        query_no = (soup.find('docno')).text.encode('utf-8')
        (soup.find('docno')).decompose()
        query = (soup.find('doc')).text.encode('utf-8')
        (soup.find('doc')).decompose()

        query_no = query_no.strip(" ")
        rawquery_dict[query_no] = query

    r = Retriever("", "")
    query_dict = {}
    for query_no, raw_query in rawquery_dict.viewitems():
        query_dict[query_no] = r.process_query(raw_query, True)

    print "Enter the query no"
    no = raw_input()
    no = str(no)
    query = query_dict[no]
    print query
    s = SnippetGen(file_name)
    query_results = s.get_queryresults(name='bms25')
    results = query_results[no]
    # pdb.set_trace()
    s.get_snippet(query, results)
Example #4
0
    def __init__(self, car):
        self.car = car
        self.rules = [
            ['1', ['signal:RED'], ['acceleration:N']
             ],  # rule base for signal, stop sign and speed limit
            ['2', ['signal:AMBER'], ['acceleration:N']],
            ['3', ['signal:GREEN'], ['acceleration:Y']],
            ['4', ['sign:STOP'], ['acceleration:N']],
            ['5', ['speed:Y'], ['aceleration:Y']],
            ['6', ['speed:N'], ['acceleration:N']]
        ]

        self.evtgen = EventGenerator.EventGenerator(
        )  # creating random event generator object
        self.rt = Retriever(
            'casebasedplan.txt')  # Retriever object for case retrieval
        self.exception_event = ExceptionalEvent()
        self.laneChanged = False
Example #5
0
def load_retrievers(dbtype, viewing=False):
    # INIT DATABASE RETRIEVERS
    TRAIN_DBS, VAL_DBS, TEST_DBS, TRAIN_N_DBS, VAL_N_DBS, TEST_N_DBS = \
              load_dbs(dbtype, viewing)
    train_pos = Retriever(TRAIN_DBS)
    train_neg = Retriever(TRAIN_N_DBS)
    train_ret = MixedRetriever(train_pos, train_neg)

    val_pos = Retriever(VAL_DBS)
    val_neg = Retriever(VAL_N_DBS)
    val_ret = MixedRetriever(val_pos, val_neg)

    test_pos = Retriever(TEST_DBS)
    test_neg = Retriever(TEST_N_DBS)
    test_ret = MixedRetriever(test_pos, test_neg)
    return train_ret, val_ret, test_ret
#Get query_dict from the file cacm.query.txt
f = open('cacm.query.txt','r')
soup = BeautifulSoup(f.read(), 'html.parser')
f.close()
rawquery_dict = {}
for i in range(64):
  query_no = (soup.find('docno')).text.encode('utf-8')
  (soup.find('docno')).decompose()
  query = (soup.find('doc')).text.encode('utf-8')
  (soup.find('doc')).decompose

  query_no = query_no.strip(" ")
  rawquery_dict[query_no] = query

r = Retriever("", "")
query_dict = {}
for query_no, raw_query in rawquery_dict.viewitems():
  query_dict[query_no] = r.process_query(raw_query, True)
 
print "Enter the query no"
no = raw_input()
no = str(no)
query = query_dict[no]
print query
s = SnippetGen()
query_results = s.get_queryresults(name = 'bms25')
results = query_results[no]
# pdb.set_trace()
s.get_snippet(query,results)
Example #7
0
from Retriever import Retriever
R = Retriever()

# pagerank updates for random names
res = R.fetch_objects(size=1000)
print(res.keys())

n_iterations = 10000
d = 0.7
n = float(len(res.keys()))

for i in range(n_iterations):
    # for each name...
    for k, v in res.items():

        # calculate pagerank...
        updated_prob = (1 - d) * 1 / n

        # ...by inspecting its parents
        for parent_node in v.parents:
            # parents with high pageranks but few outbound are best
            updated_prob += d * (parent_node.pagerank) / parent_node.n_children

        # update node's pagerank
        v.update_pagerank(updated_prob)

    if i % (n_iterations / 5) == 0:
        print("iteration", i)

R.put_objects(res)