def __init__(self,Q): self.coincidences = hashtable(Q * 10) self.arcsByPoint = hashtable(Q * 10) self.pointsByPoint = hashtable(Q * 10) self.arcs=[] self.forwardOut = [] self.backwardOut = []
def __init__(self, Q): self.coincidences = hashtable(Q * 10) self.arcsByPoint = hashtable(Q * 10) self.pointsByPoint = hashtable(Q * 10) self.arcs = [] self.forwardOut = [] self.backwardOut = []
def super_resolution_train(mat, Qangle, Qstrenth, Qcoherence): Q = np.zeros((Qangle * Qstrenth * Qcoherence, 4, 11 * 11, 11 * 11)) V = np.zeros((Qangle * Qstrenth * Qcoherence, 4, 11 * 11, 1)) h = np.zeros((Qangle * Qstrenth * Qcoherence, 4, 11 * 11)) mat = cv2.cvtColor(mat, cv2.COLOR_BGR2YCrCb)[:, :, 2] mat = cv2.normalize(mat.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX) HR = mat LR = cv2.GaussianBlur( HR, (0, 0), 2 ) # should use low-resolution image but here use blur image because I am lazy for xP in range(5, LR.shape[0] - 6): for yP in range(5, LR.shape[1] - 6): patch = LR[xP - 5:xP + 6, yP - 5:yP + 6] [angle, strenth, coherence] = hashtable(patch, Qangle, Qstrenth, Qcoherence) j = angle * 9 + strenth * 3 + coherence A = patch.reshape(1, -1) b = HR[xP][yP] t = xP % 2 * 2 + yP % 2 Q[j, t] += A * A.T V[j, t] += A.T * b for t in range(4): for j in range(Qangle * Qstrenth * Qcoherence): h[j, t] = cg(Q[j, t], V[j, t])[0] return h
def testhash(data, hashtype = None): h = hashtable.hashtable() if hashtype is not None: h.opentype = hashtype for i in data: h.insert(i, random.randint(0, 100)) print("=" * 30) for i in h: print(i) print("=" * 30) print(h.search(data[3])) print(h.search(200)) print(h[data[7]]) h[101] = 45 print(h[1]) print(h.search(101)) print("=" * 30) print(h.delete(data[3])) print(h.delete(200)) print("=" * 30) h.print() print("=" * 30)
def rehash(self): self.rehashes = self.rehashes + 1 old_hashtable = self.hashtable self.tableSize = int(self.tableSize * self.rehashMultiplyer) self.hashtable = hashtable.hashtable(self.tableSize) for item in old_hashtable.table: if isinstance(item, key.key): self.rehashProbing(item, False)
def __init__(self, tab = None, expander = taquin_expander_simple, selector = taquin_selector_simple): self.expander = expander self.selector = selector self.success = False self.n = len(tab) self.closed = hashtable() self.e = (reduce(lambda x, y: x + y, tab, []), 0) self.opened = hashtable() self.opened.append(self.e) self.goods = 0 for i in xrange(self.n ** 2): if self.e[i] == i + 1: self.goods += 1 else: break
def __init__(self, tab=None, expander=taquin_expander_simple, selector=taquin_selector_simple): self.expander = expander self.selector = selector self.success = False self.n = len(tab) self.closed = hashtable() self.e = (reduce(lambda x, y: x + y, tab, []), 0) self.opened = hashtable() self.opened.append(self.e) self.goods = 0 for i in xrange(self.n**2): if self.e[i] == i + 1: self.goods += 1 else: break
def __init__(self, tableSize, valueC=50, rehashMultiplyer=2): self.ldown = 0 self.lup = 0 self.tableSize = tableSize self.hashtable = hashtable.hashtable(tableSize) self.valueC = valueC self.rehashMultiplyer = rehashMultiplyer self.numProbes = 0 self.rehashes = 0 self.maxCollisionChain = 0 self.collisionCounter = 0 self.curCollisionChain = 0
def test_plus_duplicate(self): keys = set() table2 = hashtable() for i in range(1, self.CASES + 1): self.table.add(chr(i), i) table2.add(chr(i), self.CASES - i) keys.add(chr(i)) self.table += table2 for key in self.table: self.assertTrue(key in keys) keys.remove(key) self.assertEqual(self.CASES, self.table.size) self.assertEqual(0, len(keys))
def testchainhash(data): h = hashtable.hashtable() for i in data: h.chainedinsert(i, random.randint(0, 100)) print("=" * 30) for i in h: print(i) print("=" * 30) print(h.chainedsearch(data[3])) print(h.chainedsearch(200)) print(h[data[7]]) h[101] = 4 print(h[data[7]]) print(h.chainedsearch(101)) print("=" * 30) print(h.chaineddelete(data[3])) print(h.chaineddelete(200)) print("=" * 30) h.print() print("=" * 30)
#! /usr/bin/env python import hashlib import hashtable def calc_hash(word,hash_alg='md5'): if hash_alg not in hashlib.algorithms: print ("%s isn't exists in 'hashlib'!" % hash_alg) return None hash_obj = hashlib.new(hash_alg) hash_obj.update(word.encode()) return hash_obj.hexdigest() htable = hashtable.hashtable('htable-main.db') print try: htable.createtable(hashlib.algorithms) except: pass print try: with open('wordlist.txt','r') as file: line = file.readline() i = 1 while line != None and i < 10: if line == '\n' or line == ' ': line = file.readline()
def __init__(self,Q): self.coincidences = hashtable(Q * 10) self.arcsByPoint = hashtable(Q * 10) self.pointsByPoint = hashtable(Q * 10) self.arcs=[] self.length=0
elif width <= 2000 and height <= 2000: fx = 1.3 fy = 1.3 else: fx = 1 fy = 1 mat = cv2.imread(painting_name) [width, height, channel] = mat.shape h = np.load("lowR2.npy") mat = cv2.cvtColor(mat, cv2.COLOR_BGR2YCrCb)[:, :, 2] LR = cv2.resize(mat, (0, 0), fx=fx, fy=fy) LRDirect = np.zeros((LR.shape[0], LR.shape[1])) for xP in range(5, LR.shape[0] - 6): for yP in range(5, LR.shape[1] - 6): patch = LR[xP - 5:xP + 6, yP - 5:yP + 6] [angle, strenth, coherence] = hashtable(patch, Qangle, Qstrenth, Qcoherence) j = angle * 9 + strenth * 3 + coherence A = patch.reshape(1, -1) t = xP % 2 * 2 + yP % 2 hh = np.matrix(h[j, t]) LRDirect[xP][yP] = hh * A.T print("Test is off") mat = cv2.imread(painting_name) mat = cv2.cvtColor(mat, cv2.COLOR_BGR2YCrCb) LR = cv2.resize(mat, (0, 0), fx=fx, fy=fy, interpolation=cv2.INTER_LINEAR) LRDirectImage = LR LRDirectImage[:, :, 2] = LRDirect A = cv2.cvtColor(LRDirectImage, cv2.COLOR_YCrCb2RGB) im = Image.fromarray(A) im.save(painting_name)
import hashtable table = hashtable.hashtable(10) links = ["https://reddit.com/r/futureporn" , "https://www.reddit.com/r/futureporn/top/?sort=top&t=all", "https://www.reddit.com/r/futureporn/top/?sort=top&t=all&count=25&after=t3_1wdbte" , "https://www.reddit.com/r/ImaginaryBestOf/top/?sort=top&t=all" , "https://www.reddit.com/r/ImaginaryBestOf/top/?sort=top&t=all&count=25&after=t3_3tk957" , "https://www.reddit.com/r/ImaginaryWinterscapes/top/?sort=top&t=all" , "https://www.reddit.com/user/Lol33ta/m/imaginarycharacters/top/?sort=top&t=all" , "https://www.reddit.com/user/Lol33ta/m/imaginarylandscapes/top/?sort=top&t=all"] for link in links: print table.visited(link), link print table.visited("https://reddit.com/r/futureporn") , "https://reddit.com/r/futureporn" table.printtable() table.grow() print "-------------new table-----------------------" table.printtable()
import csv from package import package from hashtable import hashtable import datetime from datetime import time import calculator #Variables hashtable = hashtable() addressList = [] distanceList = [] #Read in package data from csv with open('data/packages.csv') as packageFile: reader = csv.reader(packageFile, delimiter=',') for row in reader: timestamp = time(hour=0, minute=0, second=0, microsecond=0) temp = package(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], timestamp) hashtable.insert(int(temp.id) - 1, temp) #Read in distance data from csv with open('data/distances_table.csv') as distanceFile: reader = csv.reader(distanceFile, delimiter=',') for row in reader: distanceList.append(row) #Read in address data from csv with open('data/addresses.csv') as addressFile: reader = csv.reader(addressFile, delimiter=',')
picdirs = [PICS_PATH] for path in picdirs: try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise if os.path.exists(BASE_PATH + "table.p"): table = pickle.load( open( "table.p", "rb" ) ) else: table = hashtable.hashtable(64) url = ["https://reddit.com/r/futureporn", "https://www.reddit.com/r/imaginarywinterscapes" , "https://www.reddit.com/r/ImaginaryMindscapes" , "https://www.reddit.com/r/ImaginaryWorlds" ] random.shuffle(url) count = 0 list = [] while( count < numberofpics): if len(list) < numberofpics - count: list = extract_pic_links(url , table , numberofpics * sources ) random.shuffle(list) sublist = []
def main(): ht = h.hashtable(10) print ht.hash(114)
def setUp(self): self.table = hashtable()