async def load(channel): global gamedictionary global moviedictionary if channel == gdc: mediadict = gamedictionary elif channel == mdc: mediadict = moviedictionary else: return messageBundle = [] tasks = [] async for message in channel.history(): lines = message.content.split("\n") if await is_emoji(lines[0][0]): messageBundle.append(lines) else: lines[2] = await handledparam(channel, lines[2]) messageBundle.append(lines[1:]) messageBundle.reverse() descriptor = Descriptor() tasks.append( asyncio.create_task(Descriptor.load(descriptor, messageBundle))) mediadict[lines[0]] = descriptor messageBundle = [] asyncio.gather(*tasks)
def __init__(self, parent): Descriptor.__init__(self, parent) self.feature_vectors = self.source.soundfile.array self.length = len(self.feature_vectors) self.samplerate = self.source.soundfile.samplerate Segmentations.Frames(self, self.length, self.samplerate)
def __init__(self, paths, logger, jobSerialNumber, argDict={}): self.package = '' # set by Jobs # stuff to do with data sets self.datasets = {} # init the base class Descriptor.__init__(self, paths, logger, jobSerialNumber, argDict)
def __init__(self, parent, hopsize = 2200, steps_per_octave = 12, reinit = False, ): Descriptor.__init__(self, parent, Default_Preprocessor = spectral.Spectrogram, reinit = reinit)
def __init__(self, parent, reinit = False, ): Descriptor.__init__(self, parent, Default_Preprocessor = spectral.Spectrogram, reinit = reinit) spectrum = self.parent_processor.feature_vectors self.feature_vectors = Relations.border_fir2d_filter(spectrum, filter_x = [1] * 20, filter_y = [1] * 40)
def generate_sign_database(self, tr, decs_file_dir, K, depth, total_img): L = (K**(depth+1)-1) / (K-1) - 1 self.sign_database = np.empty(shape=(total_img, L)) for i in range(0, total_img): #load keypoint desc = Descriptor() desc.load_desc(decs_file_dir, 'desc_'+str(i)) sign = self.generate_sign(tr, desc.desc, K, depth) print sign[0:30] self.sign_database[i,:] = sign
def __init__(self, parent, N=10000): Descriptor.__init__(self, parent, Default_Preprocessor=Audio) x = self.parent_processor.feature_vectors self.x_ms = x_ms = zeros((len(x) / N)) mx = 1.0 * max(x) for i in range(0, len(x) / N): a = i * N b = a + N - 1 r = x[a:b] / mx x_ms[i] = sum(r * r) self.feature_vectors = (1 / sqrt(N)) * sqrt(x_ms)
def __init__(self, parent, framesize = None, hopsize = 100, window_function = signal.hanning, number_of_vectors_used = 15, reinit = False, ): Descriptor.__init__(self, parent, # Default_Preprocessor = Cepstrogram, Default_Preprocessor = Spectrogram, reinit = reinit) if not framesize: lp = len(self.parent_processor) logfs = min(int(log2(lp / 32)), 10) framesize = pow(2, logfs) if hopsize > framesize / 4: hopsize = framesize / 4 self._set_samplerate(hopsize) self.hopsize = hopsize self.framesize = framesize print "EOF..." if len(self.parent_processor) < framesize: self._delete_from_parents() raise UnderflowError, "Thingy is too small...\nHere I pull the plug in order to avoid a segfaulty thingy." self.svd_fft_fft(self.parent_processor.feature_vectors, framesize = framesize, hopsize = hopsize, window_function = window_function, number_of_vectors_used = number_of_vectors_used) z, lambdas, EOFs = svdeofs.svdeofs(self.feature_vectors) self.feature_vectors = z[:, :15] #self.feature_vectors, self.lambdas = svd_eofs(self.feature_vectors, # number_of_vectors_to_keep = 15) Segmentations.Frames(self, len(self.feature_vectors), self.parent_processor.samplerate, framesize, hopsize, window_function = window_function, )
def __init__(self, parent, reinit = False): Descriptor.__init__(self, parent, Default_Preprocessor = spectral.Spectrogram, reinit = reinit) spec = self.parent_processor.feature_vectors #[3000:5000] a = signal.sepfir2d(diff(transpose(spec)), [1], [.1] * 10) b = (where(a > 10, a, 0.)) c = sum(b) d = where(c > 100.0, c, 0.) self.feature_vectors = d
def __init__(self, paths, logger, jobSerialNumber, argDict={}): Descriptor.__init__(self, paths, jobSerialNumber, logger) if self.mode != 'Dummy': self.mode = 'Watcher' # a list of worker job minders self.confederation = [] self.package = paths.package self.__dict__.update(argDict) self.name = self.jobGroup self.fixName() self.setPathExtensions() self.setRunPath(self.pathExtensions)
class AtomTypeDescriptor(object): def __init__(self, carbons, oxygens, nitrogens, target): coordinate = [] self.__target = target self.__carbon = Descriptor(carbons) self.__oxygen = Descriptor(oxygens) self.__nitrogen = Descriptor(nitrogens) coordinate.extend(self.__carbon.get_coordinate()) coordinate.extend(self.__oxygen.get_coordinate()) coordinate.extend(self.__nitrogen.get_coordinate()) self.__coordinate = tuple(coordinate) def get_coordinate(self): return self.__coordinate def get_target(self): return self.__target
def __init__(self, paths, logger, jobSerialNumber, argDict={}): self.datasets = {} self.displayClass = [] self.displayProcess = [] self.displayComponent = [] # init the base class Descriptor.__init__(self, paths, logger, jobSerialNumber) # update with values from the pacakge config file # jobGroup, jobTransformCmd, jobTransformJobName self.name = argDict.pop('jobTransformJobName') self.jobTransformCmd = argDict.pop('jobTransformCmd') self.__dict__.update(argDict) self.fixName() self.setPathExtensions() self.setRunPath(self.pathExtensions)
def __str__(self): "Converts self to a string" s = Descriptor.__str__(self) s += 'package: %s\n' % str(self.package) s += 'Descriptor hasData: %s\n' % str(self.hasData()) for datasetType in self.datasets.keys(): for dataset in self.datasets[datasetType]: s += 'Dataset type: ' + str( datasetType) + ', dataset physical name: ' + str( dataset.name) + '\n' return s
def __init__(self, parent, hopsize = 110, framesize = None, keep_bands_until = 0, window_function = signal.hanning, Default_Preprocessor = audio.Audio, show_progressbar = True, reinit = False, ): Descriptor.__init__(self, parent, Default_Preprocessor = Default_Preprocessor, reinit = reinit) self._set_samplerate(hopsize) if not framesize: lp = len(self.parent_processor) logfs = min(int(log2(lp / 32)), 8) framesize = pow(2, logfs) self.framesize = framesize self.hopsize = hopsize self.feature_vectors = calculate_spectrogram(self.parent_processor.feature_vectors, framesize = framesize, hopsize = hopsize, keep_bands_until = keep_bands_until, window_function = window_function, show_progressbar = 1) self.segmentation = Segmentations.Frames(self, len(self.feature_vectors), self.parent_processor.samplerate, framesize = framesize, hopsize = hopsize, window_function = window_function)
def __init__( self, parent, hopsize=2200, steps_per_octave=12, reinit=False, ): Descriptor.__init__(self, parent, Default_Preprocessor=audio.Audio, reinit=reinit) a_sgn = self.parent_processor.feature_vectors Nyq = self.parent_processor.samplerate / 2. l = int(len(a_sgn) / hopsize) chroma = zeros((12, l), typecode='f') * 1.0 self.bands = [] note_to_freq = lambda n: pow(2, (n - 69) / 12.) * 440 for n in range(36, 36 + 12): f1 = note_to_freq(n) f2 = note_to_freq(n + 1) print f1, f2 band = bandpass_filter(a_sgn, f1, f2, Nyq) band = log(abs(band) + 1) print len(band), l band = reshape(band[:l * hopsize], (l, hopsize)) print shape(band) print n % 12 band = sum(band, 1) self.bands.append(band) chroma[n % 12] += band chroma = transpose(chroma) / (sum(chroma, 1) + 1) self.feature_vectors = chroma
def __init__(self, carbons, oxygens, nitrogens, target): coordinate = [] self.__target = target self.__carbon = Descriptor(carbons) self.__oxygen = Descriptor(oxygens) self.__nitrogen = Descriptor(nitrogens) coordinate.extend(self.__carbon.get_coordinate()) coordinate.extend(self.__oxygen.get_coordinate()) coordinate.extend(self.__nitrogen.get_coordinate()) self.__coordinate = tuple(coordinate)
def descriptor_generator(self): # go over the entire scene and create a descriptor. # Note: This will currently not work with scenes exported ia the batch option. descriptor_struct = Descriptor() def descriptor_recurse(obj, structure): # will recurse the object and add the object to the structure prefixes = set() important_children = [] for child in obj.children: if not child.name.startswith('NMS'): continue if child.NMSDescriptor_props.proc_prefix != "": p = child.NMSDescriptor_props.proc_prefix # let's do a bit of processing on the prefix first to make sure all is good # the user may or may not have put a leading or trailing underscore, so we'll get rid of them and add our own just in case... prefix = "_{0}_".format(p.strip("_")) prefixes.add(prefix) important_children.append(child) # add only children we like to the list (ie. those with some proc info) for prefix in prefixes: structure.add_child(prefix) # adds a Node_List type child object # now recurse over the children with proc info for child in important_children: node = structure.get_child("_{0}_".format(child.NMSDescriptor_props.proc_prefix.strip("_"))).add_child(child) # this will add a Node_Data object and return it descriptor_recurse(child, node) # we also need to rename the object so that it is consistent with the descriptor: prefix = child.NMSDescriptor_props.proc_prefix.strip("_") stripped_name = child.name[len("NMS_"):].upper() if stripped_name.strip('_').upper().startswith(prefix): child.NMSNode_props.override_name = "_{0}".format(stripped_name.strip('_').upper()) else: # hopefully the user hasn't messed anything up... child.NMSNode_props.override_name = "_{0}_{1}".format(prefix, stripped_name.strip('_').upper()) descriptor_recurse(self.NMSScene, descriptor_struct) print(descriptor_struct) return descriptor_struct
def __str__(self): s = '--------- JobTransformDescriptor ---------\n' s += Descriptor.__str__(self) return s
def dataForXMLNode(self): names = ['displayComponent', 'displayClass', 'displayProcess'] dict = Descriptor.dataForXMLNode(self) for n in names: dict[n] = self.__dict__[n] return dict
def deferredMinderUpdate(self): Descriptor.deferredDataSetUpdate(self) self.updateJobTransformCommand()
import cv2 import glob import os import fnmatch #constructing an argument parser ap = argparse.ArgumentParser() ap.add_argument("-d", "--dataset", required=True, help="C:\ml\Monumark\dataset") ap.add_argument("-i", "--index", required=True, help="C:\ml\Monumark") args = vars(ap.parse_args()) #initialising the color descriptor cd = Descriptor((8, 12, 3)) output = open(args["index"], "w") #for each image calculate the features and write in a file for imagePath in glob.glob(args["dataset"] + "/*.png"): imageID = imagePath[imagePath.rfind("/") + 1:] print(imageID) image = cv2.imread(imagePath) features = cd.describe(image) features = [str(f) for f in features] output.write("%s,%s\n" % (imageID, ",".join(features))) output.close()
def _createTraitDescriptor(self): from Descriptor import Descriptor return Descriptor()
from Descriptor import Descriptor from Searcher import Searcher import argparse import cv2 import re ap = argparse.ArgumentParser() ap.add_argument("-i", "--index", required=True, help="C:\ml\Monumark") ap.add_argument("-q", "--query", required=True, help="C:\ml\Monumark\queries") ap.add_argument("-r", "--result_path", required=True, help="C:\ml\Monumark\Final_Results") args = vars(ap.parse_args()) cd = Descriptor((8, 12, 3)) query = cv2.imread(args["query"]) features = cd.describe(query) results = Searcher(args["index"]) final = results.search(features) cv2.imshow("Query", query) for (score, resultID) in final: print(score, resultID) resultID = resultID.split("\\")[1] print(score, resultID) result = cv2.imread(args["result_path"] + "/" + resultID) cv2.imshow("Result", result)
self.nleaves = nleaves [tr, A] = vl.vl_hikmeans(data, self.K, self.nleaves, verb=1) tr.save(os.path.join(file_dir, file_name)) #save A in signature #try: # with open(os.path.join('./Signature/', 'sign_1000'), 'wb') as file_data: # np.save(file_data, A) #except IOError as ioerror: # print ioerror if __name__=="__main__": desc = Descriptor() data = np.empty(shape=(128, 180*1000)) k = 0 for i in range(0, 180): desc.load_desc('./Descriptor/', 'desc_'+str(i)) data[:, k:k+1000] = desc.desc print 'desc_'+str(i) k += 1000 tree = Tree() tree.generate_tree(data, 10, 10000, './', 'tree.vlhkm') #tr = vl._vlfeat.VlHIKMTree(0, 0) #tr.load('./tree.vlhkm') #At = vl.vl_hikmeanspush(tr, data[:, 0:1000])
def Train_Database_Sign(self, database, version, num_of_kpts, force_update = False, K = 10,#tree branch depth = 4, #depth nleaves = 10000 #leaves in the tree ): #load information from database num_in_set = database[0] num_of_sets = database[1] data_dir = database[2] total = num_in_set * num_of_sets updated = force_update # will be turned true if one of the steps has been processed, so that the following step will be focused to processs! ''' generate desc~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ dir should be saved ''' desc_dir = os.path.join(self.DESC_DIR, 'db_version_'+str(version), str(num_of_kpts)) if updated or (not os.path.isdir(desc_dir) or os.listdir(desc_dir) == []): updated = True if not os.path.isdir(os.path.join(self.DESC_DIR, 'db_version_'+str(version))): os.mkdir(os.path.join(self.DESC_DIR, 'db_version_'+str(version))) if not os.path.isdir(desc_dir): os.mkdir(desc_dir) for d in data_dir: img_idx = d[0] class_idx = d[1] img_dir = d[2] ''' k is the index of desc included in the name, should be ordered by the class ''' k = img_idx + class_idx * num_in_set #load image(load image, convert to np array with float type) img = Image.open(img_dir).convert('L') img_s = self.StandalizeImage(img, 480) img_data = np.asarray(img_s, dtype=float) kp = Keypoint() kp.generate_keypoint(num_of_kpts, img.size[0], img.size[1], self.SIGMA) #sigma is set to 1 currently #kp.save_keypoint(self.KEYPOINT_DIR, self.KEYPOINT_FILE+str(num_of_kpts)) #save to a directory print 'Random keypoint generated' #generate desc desc = Descriptor() desc.generate_desc(img_data, kp.kpt) desc.save_desc(desc_dir, self.DESC_FILE + str(k)) print desc.desc #load to a large matrix #desc_database[:, k*num_of_kpts:k+num_of_kpts] = desc.desc #add to the database therefore later can be used to train the tree print '=>'+str(k) , print 'Descriptor Generated' #load desc desc_database = np.empty(shape=(128, total*num_of_kpts), dtype=np.uint8) for k in range(0, total): desc = Descriptor() desc.load_desc(desc_dir, self.DESC_FILE + str(k)) desc_database[:, k*num_of_kpts:(k+1)*num_of_kpts] = desc.desc print 'Descriptor Loaded' ''' Build the tree~~~~~~~~~~~~~~~~~~~~~~~~~ ''' tree_dir = os.path.join(self.TREE_DIR, 'db_version_' + str(version)) if updated or (not os.path.isfile(os.path.join(tree_dir, str(num_of_kpts) + self.TREE_FILE))): updated = True if not os.path.isdir(tree_dir): os.mkdir(tree_dir) tree = Tree() tree.generate_tree(desc_database, K, nleaves, tree_dir, str(num_of_kpts) + self.TREE_FILE) print 'Tree built' ''' Generate signature~~~~~~~~~~~~~~~~~~~~~~~~~ ''' sign_dir = os.path.join(self.SIGN_DIR, 'db_version_' + str(version)) if updated or (not os.path.isfile(os.path.join(sign_dir, self.SIGN_FILE+str(num_of_kpts)))): updated = True tr = vl._vlfeat.VlHIKMTree(0, 0) tr.load(os.path.join(tree_dir, str(num_of_kpts) + self.TREE_FILE)) print 'Tree Loaded' sign = Signature() sign.generate_sign_database_dir(tr, desc_database, K, depth, total, num_of_kpts) if not os.path.isdir(sign_dir): os.mkdir(sign_dir) sign.save_sign(sign_dir, self.SIGN_FILE+str(num_of_kpts)) print 'Signature Generated' else: print 'Signature Already Generated' del desc_database return updated;
def Classifier(self, database, version, num_of_kpts, cutoff, top = 5, K = 10, depth=4): num_in_set = database[0] num_of_sets = database[1] test_database = database[3] total = num_in_set * num_of_sets num_in_test_set = len(test_database) / num_of_sets classify_score =np.zeros(num_of_sets) class_name = [[] for i in range(num_of_sets)] #load weight wt = Weight(cutoff) sign_dir = os.path.join(self.SIGN_DIR, 'db_version_' + str(version)) wt.load_weights(sign_dir, self.WEIGHT_FILE+str(num_of_kpts)+'_'+str(cutoff)) wt.load_weighted_sign(sign_dir, self.WEIGHT_SIGN_FILE+str(num_of_kpts)+'_'+str(cutoff)) #Load tree tree_dir = os.path.join(self.TREE_DIR, 'db_version_' + str(version)) tr = vl._vlfeat.VlHIKMTree(0, 0) tr.load(os.path.join(tree_dir, str(num_of_kpts) + self.TREE_FILE)) for k in test_database: #randomly get image from the img_dir img = Image.open(k[2]).convert('L') img = self.StandalizeImage(img, 480) img_data = np.asarray(img, dtype=float) #generate desc, sign and weighted sign kp = Keypoint() #kp.load_keypoint(self.KEYPOINT_DIR, self.KEYPOINT_FILE+str(num_of_kpts)) kp.generate_keypoint(num_of_kpts, img.size[0], img.size[1], self.SIGMA) desc = Descriptor() desc.generate_desc(img_data, kp.kpt) #very important !! convert desc to float type #desc_f = np.array(desc.desc, dtype=float) sign = Signature() s = sign.generate_sign(tr,desc.desc, K, depth) weighted_sign = wt.weight_sign(s) #vote d=np.empty(total) for i in range(0, total): d[i] = self.dist(wt.weighted_sign[i,:], weighted_sign) perm = np.argsort(d) vote_for = np.floor((perm[0:top])/num_in_set)+1 votes = vl.vl_binsum(np.zeros(num_of_sets), np.ones(top), vote_for) #print votes best = np.argmax(votes) if best == k[1]: classify_score[k[1]] += 1 print '=>'+str(k[0]) class_name[k[1]] = k[3] classify_score = classify_score / num_in_test_set return zip(class_name, classify_score.tolist())
import os.path from PIL import Image, ImageOps import numpy as np from Descriptor import Descriptor from Keypoint import Keypoint if __name__ == '__main__': img = Image.open("./Image_large/ibis/image_0002.jpg").convert('L') [width, height] = img.size h = 480 ratio = float(height)/h w = int(width/ratio) img = ImageOps.fit(img, [w, h] , Image.ANTIALIAS) img.save('./Image/test.png') print img img_data = np.asarray(img, dtype=float) kpt = Keypoint() kpt.generate_keypoint(1000, img.size[0], img.size[1], 1) desc = Descriptor() desc.generate_desc(img_data, kpt.kpt) print desc.desc print str(w)
def __init__(self, paths, jobSerialNumber, logger): Descriptor.__init__(self, paths, jobSerialNumber, logger, {}) self.descs = [] self.jobGroup = 'ContainerGroup' # jobGroup is used by MinderFactory self.name = 'Container' self.identifiedName = self.name + str(self.jobSerialNumber)
def __init__(self, paths, logger, jobSerialNumber, argDict={}): Descriptor.__init__(self, paths, logger, jobSerialNumber, argDict) self.descriptor = None self.outChainFiles = [] # filenames (posibly with subdirectories) self.name = 'ChainJob' self.jobGroup = 'ContainerGroup' # jobGroup is used by MinderFactory
def _createDescriptor(self, value, locator): from Descriptor import Descriptor return Descriptor(value, locator)
async def parse(command, channel): global gamedictionary global moviedictionary listofargs = command.split(" ", 2) if len(listofargs) < 3: return "Command, movie/game or name missing" listofargs[0] = listofargs[0].lower() listofargs[1] = listofargs[1].lower() if "game" in listofargs[0:2]: media = gdc mediadict = gamedictionary elif "movie" in listofargs[0:2]: media = mdc mediadict = moviedictionary else: return "Didn't clarify whether it's movie or game" remainder = listofargs[2].split("\n") name = remainder[0] if "add" in listofargs[0:2]: descriptor = Descriptor() mediadict[name] = descriptor return await add(descriptor, media, remainder) else: twonames = name.split(" -") name = await getcorrectname(twonames[0], mediadict) if not name: return "Name not found" elif "modify" in listofargs[0:2] or "update" in listofargs[0:2]: if len(twonames) < 2: return "Command is supposed to be gti modify game/movie name -d/i new value (You're missing a -d/i)" descriptor = mediadict[name] return await modify(descriptor, media, remainder) elif "remove" in listofargs[0:2] or "delete" in listofargs[0:2]: if len(twonames) < 2: del mediadict[name] else: emoji = twonames[1].strip() if emoji not in mediadict[name].distributions: return "Emoji not there" else: del mediadict[name].distributions[emoji] await save(media) elif "send" in listofargs[0:2] or "dada" in listofargs[0:2]: descriptor = mediadict[name] if len(twonames) < 2: await descriptor.showcasemessage(name, channel) else: emoji = twonames[1].strip() embed = await descriptor.assembleembed(name, emoji) if not embed: return "Emoji not there" await channel.send(embed=embed) elif "rename" in listofargs[0:2]: if len(twonames) < 2: return "Command is supposed to be gti rename game/movie oldname - newname (You're missing the - separator)" newname = twonames[1].strip() descriptor = mediadict.pop(name) mediadict[newname] = descriptor await save(media) elif "append" in listofargs[0:2]: descriptor = mediadict[name] await descriptor.appendfrom(remainder, 1) await save(media) else: return "Command doesn't exist"