def ask_total_reward(self): if len(self.trijectories) < 2: return r = np.asarray(range(len(self.trijectories))) np.random.shuffle(r) t = [self.trijectories[r[0]], self.trijectories[r[1]]] if t[0][2] > t[1][2]: preference = 0 elif t[0][2] < t[1][2]: preference = 1 else: preference = 2 if preference != -1: # Generate observation list os = [] for i in range(len(t)): env_name, seed, total_reward, trijectory = t[i] o = [] for j in range(len(trijectory)): o.append(trijectory[j][1]) os.append(o) self.add_preference(os[0], os[1], preference) '''
def triangulate(self, track, reproj_threshold, min_ray_angle_degrees, return_reason=False): """Triangulate a track and add point to reconstruction.""" os, bs = [], [] for shot_id in self.graph[track]: # This will not add in new image, it will only triangulate the shots that are included if shot_id in self.reconstruction.shots: # The formed track # one track, and the subset of the images in reconstruction right now shot = self.reconstruction.shots[shot_id] os.append(self._shot_origin(shot)) x = self.graph[track][shot_id]['feature'] b = shot.camera.pixel_bearing(np.array(x)) r = self._shot_rotation_inverse(shot) bs.append(r.dot(b)) if len(os) >= 2: # error and triangulated 3D point e, X = csfm.triangulate_bearings_midpoint( os, bs, reproj_threshold, np.radians(min_ray_angle_degrees)) if X is not None: point = types.Point() point.id = track point.coordinates = X.tolist() self.reconstruction.add_point(point) else: e = 4 if return_reason: return e '''
def dumpJson(dirname, episodes, epoch, rank): os = [] for episode in episodes: episode['o'] = episode['o'].tolist() os.append(episode['o']) with open(dirname+'/rollout_{0}_{1}.txt'.format(epoch, rank), 'w') as file: file.write(json.dumps(os))
def track_synonyms(self, o): os = [] for k, v in self.synonyms.items(): if v == o: handle = Entrez.efetch(db="taxonomy", id=k, retmode="xml") tax_record = Entrez.read(handle) o_species = "_".join( tax_record[0]["ScientificName"].split()).lower() os.append(o_species) return os + [o]
def tupleReaders(renv, tys): o = 0 os = [] rs = [] for ty in tys: o = align(o, alignOf(ty)) os.append(o) rs.append(makeReader(renv, ty)) o += sizeOf(ty) return (os, rs)
def dna(self): os = ['> %s\n' %self.filename] keys = self.residues.keys() keys.sort() for chainpos in keys: res = self.residues[chainpos] if not res.type in nucleic_acids: continue letter = get_oneletter(res.type) os.append(letter) os.append('\n') return string.join( os, '' )
def _addTriple( self, s, p, o): if type(o) in [BNode, URIRef]: self.graph.add((s, p, o)) elif type(o) is list: o_list = BNode() self.graph.add((s, p, o_list)) os = Collection(self.graph, o_list) for item in o: os.append(Literal(item)) elif o != '': self.graph.add((s, p, Literal(o)))
def _addTriple(self, s, p, o): if type(o) in [BNode, URIRef]: self.graph.add((s, p, o)) elif type(o) is list: o_list = BNode() self.graph.add((s, p, o_list)) os = Collection(self.graph, o_list) for item in o: os.append(Literal(item)) elif o != '': self.graph.add((s, p, Literal(o)))
def dna(self): os = ['> %s\n' % self.filename] keys = self.residues.keys() keys.sort() for chainpos in keys: res = self.residues[chainpos] if not res.type in nucleic_acids: continue letter = get_oneletter(res.type) os.append(letter) os.append('\n') return string.join(os, '')
def find_parents(self): os= [] if self.parents == None: return ["-"] for i,eachParent in enumerate(self.parents): if i >= Cell.cutoff: continue strings = eachParent.find_parents() for eachString in strings: os.append(eachString+self.symbols[i]) return os
def computegmms(mfccs_train): gmms = dict() for genre in mfccs_train: os = [] for pool in mfccs_train[genre]: #collect mfcc.mean withouth DC value mfcc_no0 = np.array(pool['lowlevel.mfcc.mean'][1:]) os.append(mfcc_no0) gmms[genre] = mixture.GaussianMixture(n_components=1) gmms[genre].fit(os) return gmms
def fasta( self ): os = ['> %s\n' %self.filename] # note: self.residues is indexed by instances of ChainPos keys = self.residues.keys() keys.sort() # ChainPos knows how to sort itself for chainpos in keys: res = self.residues[chainpos] # skip DNA if res.type in nucleic_acids: continue letter = get_oneletter(res.type) os.append(letter) os.append('\n') return string.join( os, '' )
def fasta(self): os = ['> %s\n' % self.filename] # note: self.residues is indexed by instances of ChainPos keys = self.residues.keys() keys.sort() # ChainPos knows how to sort itself for chainpos in keys: res = self.residues[chainpos] # skip DNA if res.type in nucleic_acids: continue letter = get_oneletter(res.type) os.append(letter) os.append('\n') return string.join(os, '')
def overseg(v_e_c, edges): end_sel = np.where(v_e_c[:, -1] == 1)[0] cross_ids = np.where(v_e_c[:, -1] == 2)[0] overseg = [] for s in end_sel: os = [] id_ = v_e_c[s, -2] idx0, idx1 = np.where(edges == id_) idx0, idx1 = idx0[0], idx1[0] idy1 = 0 if idx1 else 1 os.append(id_) while edges[idx0, idy1] not in cross_ids: # pass(to be continue) id_ = edges[idx0, idy1]
def load_triplet(path): ss = [] os = [] ps = [] with open(path, "r") as f: reader = csv.reader(f, delimiter='\t') for row in reader: s,o,p = row ss.append(e_id[s]) os.append(e_id[o]) ps.append(r_id[p]) ss = np.array(ss) os = np.array(os) ps = np.array(ps) print(len(ss)) return (ss,os,ps)
def deletedetails(): num=input("Enter the pphone number of a student that is to be deleted") with open('student_info.txt', 'r+',newline='') as f: one=csv.reader(f) rows=[] os=[] for rec in one: rows.append(rec) for i in range(len(rows)): if rows[i][0]!=num: os.append(rows[i]) #print(sows) print("*********Your account has been deleted permanently, SORRY!!!!!!********") with open('student_info.txt', 'w', newline='') as f: sm=csv.writer(f) sm.writerows(sows) f.close()
def print_het_targs(inp): ''' This prints the targets for the phase 1 proposal :param inp: pandas dataframe with name, ra(string), de(string), Vmag :return: ''' os = [] for i in inp.index: o1 = [] o1.append('\ObjName{{{}}}'.format(inp.loc[i, 'name']).replace('_', ' ')) o1.append('\NumberofObjects{1}') o1.append('\\ra{{{}}}'.format(inp.loc[i, 'ra_s'])) o1.append('\\dec{{{}}}'.format(inp.loc[i, 'de_s'])) o1.append('\magnitude{{{:2.3}}}'.format(inp.loc[i, 'V'])) o1.append('\Filter{V}') o1.append('\AcquisitionMethod{Finder Chart}') o2 = '\n'.join(o1) os.append(o2) return '\n\n'.join(os)
def preprocess(data, idx, image): print("Sorting out data") X = [data['states'][i] for i in idx] A = [data['actions'][i] for i in idx] if image != False: images = [data['images'][i] for i in idx] _, wid, hei, chan = np.shape(images[0]) X = [0] * len(images) for i in range(len(images)): X[i] = [ images[i][j].reshape((wid * hei * chan)) for j in range(len(images[i])) ] # flatten images and scale [0,1] del images # mild supervision all_labels = [] tasks = [data['tasks'][i] for i in idx] ons = [data['gt_onsets'][i] for i in idx] for i in idx: all_labels.extend(data['tasks'][i]) unique = np.sort(np.unique(all_labels)) # re cast substask names. Y is the mild supervisio per trajectory. Y = [] onsets = [] for en, task in enumerate(tasks): su_tasks = [] os = [] for en2, subtask in enumerate(task): su_tasks.append(np.where(subtask == unique)[0][0]) for en2, subtask in enumerate(ons[en]): os.append(np.where(subtask == unique)[0][0]) Y.append(su_tasks) onsets.append(os) return X, A, Y, onsets, unique
def load_hierarchies_hai(datafile, kmin, kmax, skip): n_hierarchies = (kmax - kmin) / skip hs = [] os = [] ss = [] rr = [] for k in range(kmin, kmax, skip): print basedir hfile = basedir + '/visualization/' + str( k) + 'RNG_' + datafile + '.hierarchy' # h, o, s = hierarchy.load(hfile) h, o, s, r = hierarchy.load(hfile) hs.append(h) os.append(o) ss.append(s) rr.append(r) return hs, np.array(os), ss, np.arange(kmin, kmax, skip), rr
def update_ip(self, name, ip): conn = self.conn vm = conn.lookupByName(name) xml = vm.XMLDesc(0) root = ET.fromstring(xml) if not vm: print("VM %s not found" % name) if vm.isActive() == 1: print("Machine up. Change will only appear upon next reboot") os = root.getiterator('os')[0] smbios = os.find('smbios') if smbios is None: newsmbios = ET.Element("smbios", mode="sysinfo") os.append(newsmbios) sysinfo = root.getiterator('sysinfo') system = root.getiterator('system') if not sysinfo: sysinfo = ET.Element("sysinfo", type="smbios") root.append(sysinfo) sysinfo = root.getiterator('sysinfo')[0] if not system: system = ET.Element("system") sysinfo.append(system) system = root.getiterator('system')[0] versionfound = False for entry in root.getiterator('entry'): attributes = entry.attrib if attributes['name'] == 'version': entry.text = ip versionfound = True if not versionfound: version = ET.Element("entry", name="version") version.text = ip system.append(version) newxml = ET.tostring(root) conn.defineXML(newxml)
def get_batch_images(batch_size, image_width, image_height, data_path): """ :param batch_size: batch大小 :param image_width: 期望图片宽度 :param image_height: 期望图片高度 :param data_path: :return: 图片矩阵,文件名 """ images = [] labels = [] os = [] all_names = get_type_image_names(data_path) index = 0 while index < batch_size: name = all_names[index] image, label, o = read_a_image_by_name(name, image_width, image_height, data_path) images.append(image) labels.append(label) os.append(o) index += 1 return images, labels, os
def build_model(self): videos = tf.placeholder(tf.float32, [self.batch_size, self.steps, self.img_dim]) x_ = tf.reshape(videos, [-1, self.img_dim]) # sample * steps * dim -> (sample * steps) * dim x_ = tf.nn.xw_plus_b(x_, self.W_iemb, self.b_iemb) img_input = tf.reshape(x_, [self.batch_size, self.steps, self.hidden]) hiddens = [] hidden = state = self.init_state with tf.variable_scope("RNN", reuse=None): for i in range(self.steps): if i > 0 : tf.get_variable_scope().reuse_variables() (hidden, state) = self.my_rnn(img_input[:,i,:], state) hiddens.append(hidden) hiddens = tf.pack(hiddens) hiddens = tf.squeeze(hiddens) hiddens = tf.transpose(hiddens, perm=[1, 0, 2]) videos_ = hiddens u = tf.constant(0.1, shape=[self.batch_size, self.mem_dim], name="u") y = tf.placeholder(tf.float32, [None, self.n_y], name="y") temp_videos_A = tf.matmul(tf.reshape(videos_, [-1, self.hidden]), self.A) temp_videos_A = tf.reshape(temp_videos_A, [-1, self.mem_size, self.mem_dim]) temp_videos_A = tf.transpose(temp_videos_A, perm=[0, 2, 1]) # + self.T_A # sample * mem_dim * mem_size videos_input_m = tf.transpose(temp_videos_A, perm=[0, 2, 1]) # sample * mem_size * mem_dim temp_videos_C = tf.matmul(tf.reshape(videos_, [-1, self.hidden]), self.C) temp_videos_C = tf.reshape(temp_videos_C, [-1, self.mem_size, self.mem_dim]) temp_videos_C = tf.transpose(temp_videos_C, perm=[0, 2, 1]) # + self.T_C # sample * mem_dim * mem_size videos_output_m = tf.transpose(temp_videos_C, perm=[0, 2, 1]) # sample * mem_size * mem_dim os = [] for _ in range(self.n_hop): u = tf.expand_dims(u, -1) # sample * mem_dim -> sample * mem_dim * 1 in_m = tf.batch_matmul(videos_input_m, u) in_m = tf.squeeze(in_m) in_probs = tf.nn.softmax(in_m) # sample * mem_size out_m = tf.mul(videos_output_m, tf.tile(tf.expand_dims(in_probs, 2), [1, 1, self.mem_dim])) o = tf.reduce_sum(out_m, 1) # sample * mem_dim u = tf.add(tf.matmul(tf.squeeze(u), self.W_u), o) if self.nl: u = tf.nn.elu(u) os.append(u) train_hidden = valid_hidden = os[-1] train_hidden = tf.nn.dropout(train_hidden, self.do_prob) y_hat = tf.nn.xw_plus_b(train_hidden, self.W_o, self.b_o) pred = tf.argmax(tf.nn.softmax(y_hat), 1) v_y_hat = tf.nn.xw_plus_b(valid_hidden, self.W_o, self.b_o) v_pred = tf.argmax(tf.nn.softmax(v_y_hat), 1) accuracy = tf.reduce_mean(tf.cast(tf.equal(pred, tf.argmax(y, 1)), tf.float32)) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(y_hat, y) loss = tf.reduce_mean(cross_entropy) regularizers = (tf.nn.l2_loss(self.A) + tf.nn.l2_loss(self.C) + #tf.nn.l2_loss(self.T_A) + tf.nn.l2_loss(self.T_C) + tf.nn.l2_loss(self.W_iemb) + tf.nn.l2_loss(self.b_iemb) + tf.nn.l2_loss(self.W_u) + tf.nn.l2_loss(self.W_o) + tf.nn.l2_loss(self.b_o)) loss += 5e-3 * regularizers lr = tf.train.exponential_decay( self.learning_rate, # Base learning rate. self.global_step, # Current index 200, # Decay step. 0.96, # Decay rate. staircase=True) train_op = tf.train.AdamOptimizer(lr).minimize(loss, global_step=self.global_step) return videos, u, y, loss, train_op, lr, pred, v_pred, accuracy
HERE = os.path.abspath(os.path.dirname(__file__)) PROJ_DIR = os.path.abspath(os.path.join(HERE, '../')) SITE_ROOT = os.path.abspath(os.path.join(PROJ_DIR, '../')) env_ = os.environ.get('KOMOO_ENV', 'dev') sys.path.append(PROJ_DIR) sys.path.append(SITE_ROOT) from django.core.management import setup_environ env_name = ['', 'development', 'staging', 'production'][\ 3 * (int(env_ == 'prod')) +\ 2 * (int(env_ == 'stage')) +\ (int(env_ == 'dev'))] environ = None exec 'from settings import {} as environ'.format(env_name) setup_environ(environ) # ======= script ====== ## from organization.models import Organization os = [] for o in Organization.objects.all(): if Organization.objects.filter(slug=o.slug).count() > 1: os.append(o) for o in os: o.name = o.name + ' - ' + o.community.all()[0].name o.save()
filein1=open(''+str(inputfile)+'') for line in filein1: n_freq += 1 filein1.close() filein1=open(''+str(inputfile)+'','r') spec_all=filein1.read() filein1.close() spec_text=spec_all.split('\n') for i_freq in range(n_freq-1): freq.append(float(spec_text[i_freq+1].split()[0])) os.append(float(spec_text[i_freq+1].split()[1])) for i_e in range(ne): emission_energy[i_e]=e_min+float(i_e)*de emission_intensity[i_e]= 0.0 for i_freq in range(n_freq-1): lineshape = 1/ 3.1415926 * e_sigma / ( ( emission_energy[i_e] - freq[i_freq] )**2 + e_sigma**2 ) tmp_os = os[i_freq] * lineshape emission_intensity[i_e]= emission_intensity[i_e] + tmp_os filein4=open(''+str(outputfile)+'','w') filein4.write('# Energy(eV) Intensity \n' ) for i_e in range(ne):
print "-----------------" ctx.stroke() surface.write_to_png("test.png") sys.exit() surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(width), int(height)) ctx = cairo.Context (surface) a1 = 1 a2 = 1 os = [] os.append(f.minimum[0] * -1) os.append(f.minimum[1] * -1) os.append(f.minimum[2] * -1) for i in range(f.header['edges']['num']): v1 = f.vertices[f.edges[i][0]] v2 = f.vertices[f.edges[i][1]] ctx.move_to(os[0] + v1[0], os[1] + v1[1]) ctx.line_to(os[0] + v2[0], os[1] + v2[1]) a1 = (v1[2] + f.minimum[2])/scale a2 = (v2[2] + f.minimum[2])/scale linear = cairo.LinearGradient(os[0] + v1[0], os[1] + v1[1], os[0] + v2[0], os[1] + v2[1]); linear.add_color_stop_rgb(0, a1, a1, a1) linear.add_color_stop_rgb(1, a2, a2, a2)
def pohiprogramm(sona): nim = [] om = [] os = [] sis = [] ses = [] sst = [] lle = [] lal = [] llt = [] sav = [] raj = [] olv = [] ilm = [] kaa = [] tyybid = [] sona = sona.lower() mitmussd = [] htmlsone = sonekontroll(sona) link = "https://www.eki.ee/dict/qs/index.cgi?Q="+htmlsone+"&F=M" tugevadsulghaalikud = ["k", "p", "t"] norgadsulghaalikud = ["g", "b", "d"] taishaalikud = ["a", "e", "i", "o", "u", "õ", "ä", "ö", "ü"] kaashaalikud = ["h", "j", "l", "m", "n", "r", "s", "h", "f", "š", "z", "ž", "k", "p", "t", "g", "b", "d"] tegusonad = ["27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "37i", "38", "38i"] #dictionary kõikide käänete jaoks käändedAinsuses = {"nimetav": sona, "omastav": "", "osastav": "", "sisseütlev": "", "seesütlev": "", "seestütlev": "", "alaleütlev": "", "alalütlev": "", "alaltütlev": "", "saav": "", "rajav": "", "olev": "", "ilmaütlev": "", "kaasaütlev": "", } käändedList = list(käändedAinsuses.keys()) #alates sisseütlevast käändeLõpud = ["sse", "s", "st", "le", "l", "lt", "ks", "ni", "na", "ta", "ga"] #põhiprogramm html = urlopen(link) soup = BeautifulSoup(html, "lxml") teg = False lõpptäht = "" mitusõna = 0 JärgLeitud = 0 spanstr = "" ad = soup.find_all("p", {"class": "inf"}) #print(puhastaHtmlTag(ad[0]), "\n") span = soup.find_all("span") liitsonaproov = soup.find_all("span", {"class": "m leitud_id"}) leitudsonaproov = soup.find("span", {"class": "m"}) #print(liitsonaproov) #print(leitudsonaproov) #print(span) for j in range(0, len(span)): osa = span[j] for content in osa: spanstr += str(content)#leiab koodist rea kaupa kõik span tagiga read ja salvestab stringi #print(spanstr) #kui pole sama sõna nt külmkapp if "ÕS" in spanstr: JärgLeitud = 3 elif JärgLeitud == 3: if puhastaHtmlTag(puhasta(puhasta(spanstr, "`"), "'")) == puhasta(sona, "`"): JärgLeitud = 0 else: #print("Tegu pole sama sõnaga, kuid sama tähendusega või on tehtud typo") #print(spanstr) sona = puhastaHtmlTag(puhasta(puhasta(spanstr, "`"), "'")) omastaVäärtus(käändedAinsuses, "nimetav", sona) JärgLeitud = 0 elif "javascript" in spanstr and '"mt"' in spanstr: print(spanstr) muutujaKusOnKoikVajalikInfoAgaMaOlinLiigaLollEtMärgata = puhasta(puhasta(puhastaHtmlTag(spanstr), "'"), "`") pool = muutujaKusOnKoikVajalikInfoAgaMaOlinLiigaLollEtMärgata.split(":") tyypnumber = pool[0] tyypnumber = puhasta(puhasta(puhasta(puhasta(tyypnumber, "("), ")"), "´"), " ").split("ja") kaanded = puhasta(muutujaKusOnKoikVajalikInfoAgaMaOlinLiigaLollEtMärgata, "´").split(":")[-1] kaanded = str(kaanded).split(";") kaandedainsus = kaanded[0].split(",") if len(kaanded) >= 2: mitmuss = kaanded[1].split(",") else: mitmuss = [] #tegusõnade eraldamiseks for r in tyypnumber: if r in tegusonad: teg = True #print("Tegusõna, seda ma ei kääna") return "Tegusõna" while teg == False: for s in range(len(kaandedainsus)): if kaandedainsus[s][0] == " ": kaandedainsus[s] = kaandedainsus[s][1:] if kaanded == tyypnumber:#pole käändeid käändedAinsuses["omastav"] = sona elif len(kaandedainsus) == 1 and "-" not in kaandedainsus[0]: käändedAinsuses = omastaVäärtus(käändedAinsuses, "omastav", kaandedainsus[0]) for u in range(0, 11): käändedAinsuses = omastaVäärtus(käändedAinsuses, käändedList[u+3], kaandedainsus[0] + käändeLõpud[u]) elif len(kaandedainsus) == 2 and "-" not in kaandedainsus[0]: käändedAinsuses = omastaVäärtus(käändedAinsuses, "omastav", kaandedainsus[0]) käändedAinsuses = omastaVäärtus(käändedAinsuses, "osastav", kaandedainsus[1]) for u in range(0, 11): käändedAinsuses = omastaVäärtus(käändedAinsuses, käändedList[u+3], kaandedainsus[0]+käändeLõpud[u]) elif len(kaandedainsus) == 3 and "-" not in kaandedainsus[0]: käändedAinsuses = omastaVäärtus(käändedAinsuses, "omastav", kaandedainsus[0]) käändedAinsuses = omastaVäärtus(käändedAinsuses, "osastav", kaandedainsus[1]) käändedAinsuses = omastaVäärtus(käändedAinsuses, "sisseütlev", kaandedainsus[2]) for u in range(1, 11): käändedAinsuses = omastaVäärtus(käändedAinsuses, käändedList[u+3], kaandedainsus[0]+käändeLõpud[u]) elif kaandedainsus[0][0] == "-": if len(kaandedainsus) == 1: if len(puhasta(kaandedainsus[0], "-")) <= 2: käändedAinsuses = omastaVäärtus(käändedAinsuses, "omastav", sona+puhasta(kaandedainsus[0], "-")) else: var = silbita(sona) del var[-1] var = str("".join(var)) käändedAinsus = omastaVäärtus(käändedAinsuses, "omastav", var+puhasta(kaandedainsus[0], "-")) elif len(kaandedainsus) == 2: if len(puhasta(kaandedainsus[0], "-")) <= 2 and len(puhasta(kaandedainsus[1], "-")) <=2: käändedAinsuses = omastaVäärtus(käändedAinsuses, "omastav", sona+puhasta(kaandedainsus[0], "-")) käändedAinsuses = omastaVäärtus(käändedAinsuses, "osastav", sona+puhasta(kaandedainsus[1], "-")) else: var = silbita(sona) del var[-1] var = str("".join(var)) käändedAinsus = omastaVäärtus(käändedAinsuses, "omastav", var+puhasta(kaandedainsus[0], "-")) var = silbita(sona) del var[-1] var = str("".join(var)) #print(var) käändedAinsuses = omastaVäärtus(käändedAinsuses, "osastav", var+puhasta(kaandedainsus[1], "-")) for j in range(3, 14): if käändedAinsuses[käändedList[j]] == "": käändedAinsuses[käändedList[j]] = str(käändedAinsuses["omastav"] + käändeLõpud[j-3]) JärgLeitud = 0 käändedAinsuses = KoledadIfid(sona, tyypnumber, käändedAinsuses, kaandedainsus) print(tyypnumber, kaandedainsus) #printDict(käändedAinsuses) #mitmus(käändedAinsuses, tyypnumber, mitmuss) #return käändedAinsuses, puhastaHtmlTag(ad[0]), tyypnumber, mitmuss##IDEE; dictionarylistina mitmeid "viis" #print("\n") nim.append(käändedAinsuses["nimetav"]) om.append(käändedAinsuses["omastav"]) os.append(käändedAinsuses["osastav"]) sis.append(käändedAinsuses["sisseütlev"]) ses.append(käändedAinsuses["seesütlev"]) sst.append(käändedAinsuses["seestütlev"]) lle.append(käändedAinsuses["alaleütlev"]) lal.append(käändedAinsuses["alalütlev"]) llt.append(käändedAinsuses["alaltütlev"]) sav.append(käändedAinsuses["saav"]) raj.append(käändedAinsuses["rajav"]) olv.append(käändedAinsuses["olev"]) ilm.append(käändedAinsuses["ilmaütlev"]) kaa.append(käändedAinsuses["kaasaütlev"]) tyybid.append(tyypnumber) #print(mitmuss) if mitmuss == []: mitmussd.append([]) else: mitmussd.append(mitmuss) #print(tyybid, kaandedainsus) #print(nim, om, os, sis, ses, sst, lle, lal, llt, sav, raj, olv, ilm, kaa) clearDictionary(käändedAinsuses, sona) mitmuss = [] teg = False break spanstr='' puhas = "" #print("\n", mitmussd) return [nim, om, os, sis, ses, sst, lle, lal, llt, sav, raj, olv, ilm, kaa], puhastaHtmlTag(ad[0]), tyybid, mitmussd
def sample_relational_demo(size=30): """Sample demo data with the indicate number of rows in the parent table.""" # Users faker = Faker() countries = [faker.country_code() for _ in range(5)] country = np.random.choice(countries, size=size) gender = np.random.choice(['F', 'M', None], p=[0.5, 0.4, 0.1], size=size) age = (sp.stats.truncnorm.rvs(-1.2, 1.5, loc=30, scale=10, size=size).astype(int) + 3 * (gender == 'M') + 3 * (country == countries[0]).astype(int)) num_sessions = (sp.stats.gamma.rvs(1, loc=0, scale=2, size=size) * (0.8 + 0.2 * (gender == 'F'))).round().astype(int) users = pd.DataFrame({ 'country': country, 'gender': gender, 'age': age, 'num_sessions': num_sessions }) users.index.name = 'user_id' # Sessions sessions = pd.DataFrame() for user_id, user in users.iterrows(): device_weights = [0.1, 0.4, 0.5 ] if user.gender == 'M' else [0.3, 0.4, 0.3] devices = np.random.choice(['mobile', 'tablet', 'pc'], size=user.num_sessions, p=device_weights) os = [] pc_weights = [0.6, 0.3, 0.1] if user.age > 30 else [0.2, 0.4, 0.4] pc_os = np.random.choice(['windows', 'macos', 'linux'], p=pc_weights) phone_weights = [0.7, 0.3] if user.age > 30 else [0.9, 0.1] phone_os = np.random.choice(['android', 'ios'], p=phone_weights) for device in devices: os.append(pc_os if device == 'pc' else phone_os) minutes = (sp.stats.truncnorm.rvs( -3, 3, loc=30, scale=10, size=user.num_sessions) * (1 + 0.1 * (user.gender == 'M')) * (1 + user.age / 100) * (1 + 0.1 * (devices == 'pc'))) num_transactions = (minutes / 10) * (0.5 + (user.gender == 'F')) sessions = sessions.append(pd.DataFrame({ 'user_id': np.full(user.num_sessions, int(user_id)), 'device': devices, 'os': os, 'minutes': minutes.round().astype(int), 'num_transactions': num_transactions.round().astype(int), }), ignore_index=True) sessions.index.name = 'session_id' del users['num_sessions'] # Transactions transactions = pd.DataFrame() for session_id, session in sessions.iterrows(): size = session.num_transactions if size: amount_base = sp.stats.truncnorm.rvs(-2, 4, loc=100, scale=50, size=size) is_apple = session['os'] in ('ios', 'macos') amount_modif = np.random.random(size) * 100 * is_apple amount = amount_base / np.random.randint(1, size + 1) + amount_modif seconds = np.random.randint(3600 * 24 * 365) start = datetime(2019, 1, 1) + timedelta(seconds=seconds) timestamp = sorted([ start + timedelta(seconds=int(seconds)) for seconds in np.random.randint(60 * session.minutes, size=size) ]) cancelled = np.random.random(size=size) < (1 / (size * 2)) transactions = transactions.append(pd.DataFrame({ 'session_id': np.full(session.num_transactions, int(session_id)), 'timestamp': timestamp, 'amount': amount.round(2), 'cancelled': cancelled, }), ignore_index=True) transactions.index.name = 'transaction_id' del sessions['num_transactions'] tables = { 'users': _dtypes64(users.reset_index()), 'sessions': _dtypes64(sessions.reset_index()), 'transactions': _dtypes64(transactions.reset_index()), } return Metadata(DEMO_METADATA), tables
dlys, offsets = vector_fit_line_to_phase(phs, fqs, valid) if opts.plot: fq_plot = np.c_[ [fqs for i in range(phs.shape[0])] ].T p.subplot(211) p.plot(fqs, 2*np.pi*fq_plot*dlys + offsets, lw=4) p.plot(fqs, 2*np.pi*fq_plot*dlys, lw=4) p.plot(fqs,phs.flatten(), 'k') p.plot(fqs,np.angle(data[bl][pol][integration])) p.subplot(212) p.semilogy(fqs,np.abs(data[bl][pol][integration])) p.show() bs.append(bl) xs.append(x) ys.append(y) ds.append(dlys) os.append(offsets) #Now fit it the data points, xs,ys,ds C = fit_plane(ds, xs, ys).T O = fit_plane(os, xs, ys).T import IPython; IPython.embed() #apply fits if opts.visualize: ds = np.array(ds).T os = np.array(os).T i,k = np.meshgrid(np.arange(-150,150,5), np.arange(-150,150,5)) ii = i.flatten()
def Run_video(model, Fs, seg_results, num_frames, Mem_every=None, model_name='standard'): seg_result_idx = [i[3] for i in seg_results] instance_idx = 1 b, c, T, h, w = Fs.shape results = [] if np.all([len(i[0]) == 0 for i in seg_results]): print('No segmentation result of solo!') pred = torch.zeros((b, 1, T, h, w)).float().cuda() return [(pred, 1)] while True: if np.all([len(i[0]) == 0 for i in seg_results]): print('Run video over!') break if instance_idx > MAX_NUM: print('Max instance number!') break start_frame_idx = np.argmax([max(i[2]) if i[2] != [] else 0 for i in seg_results]) start_frame = seg_result_idx[start_frame_idx] start_mask = seg_results[start_frame_idx][0][0].astype(np.uint8) # start_mask = cv2.resize(start_mask, (w, h)) start_mask = torch.from_numpy(start_mask).cuda() if model_name in ('enhanced', 'enhanced_motion'): Os = torch.zeros((b, c, int(h / 4), int(w / 4))) first_frame = Fs[:, :, start_frame] first_mask = start_mask.cpu() if len(first_mask.shape) == 2: first_mask = first_mask.unsqueeze(0).unsqueeze(0) elif len(first_mask.shape) == 3: first_mask = first_mask.unsqueeze(0) first_frame = first_frame * first_mask.repeat(1, 3, 1, 1).type(torch.float) for i in range(b): mask_ = first_mask[i] mask_ = mask_.squeeze(0).cpu().numpy().astype(np.uint8) assert np.any(mask_) x, y, w_, h_ = cv2.boundingRect(mask_) patch = first_frame[i, :, y:(y + h_), x:(x + w_)].cpu().numpy() patch = patch.transpose(1, 2, 0) patch = cv2.resize(patch, (int(w / 4), int(h / 4))) patch = patch.transpose(2, 0, 1) patch = torch.from_numpy(patch) Os[i, :, :, :] = patch if model_name == 'varysize': oss = [] first_frame = Fs[:, :, start_frame] first_mask = start_mask.cpu() if len(first_mask.shape) == 2: first_mask = first_mask.unsqueeze(0).unsqueeze(0) elif len(first_mask.shape) == 3: first_mask = first_mask.unsqueeze(0) first_frame = first_frame * first_mask.repeat(1, 3, 1, 1).type(torch.float) for i in range(b): mask_ = first_mask[i] mask_ = mask_.squeeze(0).cpu().numpy().astype(np.uint8) assert np.any(mask_) x, y, w_, h_ = cv2.boundingRect(mask_) patch = first_frame[i, :, y:(y + h_), x:(x + w_)].cpu().numpy() Os = torch.zeros((1, c, h_, w_)) patch = patch.transpose(1, 2, 0) patch = patch.transpose(2, 0, 1) patch = torch.from_numpy(patch) Os[0, :, :, :] = patch os.append(Os) Es = torch.zeros((b, 1, T, h, w)).float().cuda() Es[:, :, start_frame] = start_mask # to_memorize = [int(i) for i in np.arange(start_frame, num_frames, step=Mem_every)] to_memorize = [start_frame] for t in range(start_frame + 1, num_frames): # frames after # memorize pre_key, pre_value = model([Fs[:, :, t - 1], Es[:, :, t - 1]]) pre_key = pre_key.unsqueeze(2) pre_value = pre_value.unsqueeze(2) if t - 1 == start_frame: # the first frame this_keys_m, this_values_m = pre_key, pre_value else: # other frame this_keys_m = torch.cat([keys, pre_key], dim=2) this_values_m = torch.cat([values, pre_value], dim=2) # segment if model_name == 'enhanced': logits, _, _ = model([Fs[:, :, t], Os, this_keys_m, this_values_m]) elif model_name == 'motion': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m, Es[:, :, t - 1]]) elif model_name == 'aspp': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m, torch.round(Es[:, :, t - 1])]) elif model_name == 'sp': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m, torch.round(Es[:, :, t - 1])]) elif model_name == 'standard': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m]) elif model_name == 'enhanced_motion': logits, _, _ = model([Fs[:, :, t], Os, this_keys_m, this_values_m, torch.round(Es[:, :, t - 1])]) elif model_name == 'varysize': logits, _, _ = model([Fs[:, :, t], oss, this_keys_m, this_values_m]) else: raise NotImplementedError em = F.softmax(logits, dim=1)[:, 1] # B h w Es[:, 0, t] = em # check solo result pred = torch.round(em.float()) if MODE == 'offline': save_path = os.path.join(INTER_PATH, 'STM', '{}_{}.png'.format(instance_idx ,t + 1)) img_array = pred.cpu().squeeze().numpy().astype(np.uint8) img_s = Image.fromarray(img_array) img_s.putpalette(PALETTE) img_s.save(save_path) if t in seg_result_idx: idx = seg_result_idx.index(t) this_frame_results = seg_results[idx] masks = this_frame_results[0] ious = [] for mask in masks: mask = mask.astype(np.uint8) mask = torch.from_numpy(mask) iou = get_video_mIoU(pred, mask) ious.append(iou) if ious != []: ious = np.array(ious) reserve = list(range(len(ious))) if sum(ious >= IOU1) >= 1: same_idx = np.argmax(ious) mask = torch.from_numpy(masks[same_idx]).cuda() # if get_video_mIoU(mask, torch.round(Es[:, 0, t - 1])) \ # > get_video_mIoU(pred, torch.round(Es[:, 0, t - 1])): Es[:, 0, t] = mask reserve.remove(same_idx) # if abs(to_memorize[-1] - t) >= TO_MEMORY_MIN_INTERVAL: to_memorize.append(t) # for i, iou in enumerate(ious): # if iou >= IOU2: # if i in reserve: # reserve.remove(i) reserve_result = [] for n in range(3): reserve_result.append([this_frame_results[n][i] for i in reserve]) reserve_result.append(this_frame_results[3]) seg_results[idx] = reserve_result # update key and value if t - 1 in to_memorize: keys, values = this_keys_m, this_values_m # to_memorize = [start_frame - int(i) for i in np.arange(0, start_frame + 1, step=Mem_every)] to_memorize = [start_frame] for t in list(range(0, start_frame))[::-1]: # frames before # memorize pre_key, pre_value = model([Fs[:, :, t + 1], Es[:, :, t + 1]]) pre_key = pre_key.unsqueeze(2) pre_value = pre_value.unsqueeze(2) if t + 1 == start_frame: # the first frame this_keys_m, this_values_m = pre_key, pre_value else: # other frame this_keys_m = torch.cat([keys, pre_key], dim=2) this_values_m = torch.cat([values, pre_value], dim=2) # segment if model_name == 'enhanced': logits, _, _ = model([Fs[:, :, t], Os, this_keys_m, this_values_m]) elif model_name == 'motion': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m, Es[:, :, t + 1]]) elif model_name == 'aspp': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m, torch.round(Es[:, :, t + 1])]) elif model_name == 'sp': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m, torch.round(Es[:, :, t + 1])]) elif model_name == 'standard': logits, _, _ = model([Fs[:, :, t], this_keys_m, this_values_m]) elif model_name == 'enhanced_motion': logits, _, _ = model([Fs[:, :, t], Os, this_keys_m, this_values_m, torch.round(Es[:, :, t + 1])]) elif model_name == 'varysize': logits, _, _ = model([Fs[:, :, t], oss, this_keys_m, this_values_m]) else: raise NotImplementedError em = F.softmax(logits, dim=1)[:, 1] # B h w Es[:, 0, t] = em # check solo result pred = torch.round(em.float()) if MODE == 'offline': save_path = os.path.join(INTER_PATH, 'STM', '{}_{}.png'.format(instance_idx ,t + 1)) img_array = pred.cpu().squeeze().numpy().astype(np.uint8) img_s = Image.fromarray(img_array) img_s.putpalette(PALETTE) img_s.save(save_path) if t in seg_result_idx: idx = seg_result_idx.index(t) this_frame_results = seg_results[idx] masks = this_frame_results[0] ious = [] for mask in masks: mask = mask.astype(np.uint8) mask = torch.from_numpy(mask) iou = get_video_mIoU(pred, mask) ious.append(iou) if ious != []: ious = np.array(ious) reserve = list(range(len(ious))) if sum(ious >= IOU1) >= 1: same_idx = np.argmax(ious) mask = torch.from_numpy(masks[same_idx]).cuda() # if get_video_mIoU(mask, torch.round(Es[:, 0, t + 1])) \ # > get_video_mIoU(pred, torch.round(Es[:, 0, t + 1])): Es[:, 0, t] = mask reserve.remove(same_idx) # if abs(to_memorize[-1] - t) >= TO_MEMORY_MIN_INTERVAL: to_memorize.append(t) # for i, iou in enumerate(ious): # if iou >= IOU2: # if i in reserve: # reserve.remove(i) reserve_result = [] for n in range(3): reserve_result.append([this_frame_results[n][i] for i in reserve]) reserve_result.append(this_frame_results[3]) seg_results[idx] = reserve_result # update key and value if t + 1 in to_memorize: keys, values = this_keys_m, this_values_m for j in range(3): seg_results[start_frame_idx][j].pop(0) # pred = torch.round(Es.float()) results.append((Es, instance_idx)) instance_idx += 1 return results
def term2rdfa(cur, prefixes, treename, stanza, term_id): if len(stanza) == 0: return set(), "Not found" curies = set() tree = {} cur.execute(f""" WITH RECURSIVE ancestors(parent, child) AS ( VALUES ('{term_id}', NULL) UNION SELECT object AS parent, subject AS child FROM statements WHERE predicate = 'rdfs:subClassOf' AND object = '{term_id}' UNION SELECT object AS parent, subject AS child FROM statements, ancestors WHERE ancestors.parent = statements.stanza AND statements.predicate = 'rdfs:subClassOf' AND statements.object NOT LIKE '_:%' ) SELECT * FROM ancestors""") for row in cur.fetchall(): #print(row) parent = row["parent"] if not parent: continue curies.add(parent) if parent not in tree: tree[parent] = { "parents": [], "children": [], } child = row["child"] if not child: continue curies.add(child) if child not in tree: tree[child] = { "parents": [], "children": [], } tree[parent]["children"].append(child) tree[child]["parents"].append(parent) print("TREE ", len(tree.keys())) data = {"labels": {}} data[treename] = tree stanza.sort(key=lambda x: x["predicate"]) for row in stanza: curies.add(row.get("subject")) curies.add(row.get("predicate")) curies.add(row.get("object")) curies.discard('') curies.discard(None) ps = set() for curie in curies: if not isinstance(curie, str) or len(curie) == 0 or curie[0] in ("_", "<"): continue prefix, local = curie.split(":") ps.add(prefix) labels = {} ids = "', '".join(curies) cur.execute(f"""SELECT subject, value FROM statements WHERE stanza IN ('{ids}') AND predicate = 'rdfs:label' AND value IS NOT NULL""") for row in cur: labels[row["subject"]] = row["value"] data["labels"] = labels for key in tree.keys(): if key in labels: tree[key]["label"] = labels[key] label = term_id label_row = None for row in stanza: predicate = row["predicate"] if predicate == "rdfs:label": label_row = row label = label_row["value"] break annotation_bnodes = set() for row in stanza: if row["predicate"] == "rdf:type" and row["object"] == "owl:Axiom": annotation_bnodes.add(row["subject"]) annotations = {} for row in stanza: subject = row["subject"] if subject not in annotation_bnodes: continue if subject not in annotations: annotations[subject] = { "row": { "stanza": row["stanza"] }, "rows": [] } predicate = row["predicate"] if predicate == "rdf:type": continue elif predicate == "owl:annotatedSource": annotations[subject]["row"]["subject"] = row["object"] annotations[subject]["source"] = row elif predicate == "owl:annotatedProperty": annotations[subject]["row"]["predicate"] = row["object"] annotations[subject]["property"] = row elif predicate == "owl:annotatedTarget": annotations[subject]["row"]["object"] = row["object"] annotations[subject]["row"]["value"] = row["value"] annotations[subject]["row"]["datatype"] = row["datatype"] annotations[subject]["row"]["language"] = row["language"] annotations[subject]["target"] = row else: annotations[subject]["rows"].append(row) subject = row["subject"] si = curie2iri(prefixes, subject) S = label items = ["ul", {"id": "annotations", "class": "col-md"}] s2 = defaultdict(list) for row in stanza: if row["subject"] == term_id: s2[row["predicate"]].append(row) pcs = list(s2.keys()) pcs.sort() for predicate in pcs: p = [ "a", { "href": curie2href(predicate) }, labels.get(predicate, predicate) ] os = [] for row in s2[predicate]: if row == label_row: continue o = ["li", row2o(data, row)] for key, ann in annotations.items(): if row != ann["row"]: continue ul = ["ul"] for a in ann["rows"]: ul.append(["li"] + row2po(data, a)) o.append([ "small", { "resource": key }, [ "div", { "hidden": "true" }, row2o(data, ann["source"]), row2o(data, ann["property"]), row2o(data, ann["target"]) ], ul ]) break os.append(o) items.append(["li", p, ["ul"] + os]) hierarchy = term2tree(data, treename, term_id) h2 = "" # term2tree(data, treename, term_id) term = [ "div", { "resource": subject }, ["h2", S], ["a", { "href": si }, si], ["div", { "class": "row" }, hierarchy, h2, items] ] return ps, term
def gameInfo(self, app_id): returned = {"name": "", "description": "", "price": "", "date": ""} result = graph.query("""%s SELECT ?name WHERE { games:%s games:gameName ?name . }""" % (self.prefixes, app_id)) for x in result: returned["name"] = x[0] result = graph.query("""%s SELECT ?desc WHERE { games:%s games:description ?desc . }""" % (self.prefixes, app_id)) for x in result: returned["description"] = x[0] result = graph.query("""%s SELECT ?price WHERE { games:%s games:price ?price . }""" % (self.prefixes, app_id)) for x in result: returned["price"] = x[0] result = graph.query("""%s SELECT ?date WHERE { games:%s games:releaseDate ?date . }""" % (self.prefixes, app_id)) for x in result: returned["data"] = x[0] result = graph.query("""%s SELECT ?os WHERE { games:%s games:OS ?os }""" % (self.prefixes, app_id)) os = [] for x in result: os.append(str(x[0])) returned["os"] = os result = graph.query("""%s SELECT ?tag ?tag_name WHERE { games:%s games:categorizedBy ?tag . ?tag games:genreCategory ?tag_name . }""" % (self.prefixes, app_id)) tag = [] tag_obj = [] for x in result: tag.append(str(x[1])) tag_obj.append(self.unpre(x[0])) returned["tags"] = tag returned["tag_obj"] = tag_obj return returned
fs = [] ws = [] ss = [] aa = [] os = [] for file in files: if file[0:4] != 'fit_': continue fr = open('aotf/%s' % file, 'r') ln = fr.readline() fr.close() vs = ln.split() fs.append(float(vs[0])) ws.append(float(vs[1])) ss.append(float(vs[2])) aa.append(float(vs[3])) os.append(float(vs[4])) #Endfor # Perform the fits aotfwc = np.polyfit(fs, ws, 2) print(aotfwc) aotfsc = np.polyfit(fs, ss, 2) print(aotfsc) aotfac = np.polyfit(fs, aa, 2) print(aotfac) aotfoc = np.polyfit(fs, os, 2) print(aotfoc) # Fit/plot the results pl, ax = plt.subplots(1, 4, figsize=(14, 4)) ax[0].plot(fs, ws, 'o', label='Data')
def get_opsys(releases): os = [] for r in releases: os.append(re.search('^[a-zA-Z ]*', r[0]).group(0).strip(" ")) return os
def zeropoint(input_file, band, output_file=None, usnob_thresh=15, alloptstars=False, quiet=False): """ Calculate <band> zeropoint for stars in <input_file>. Expects a space-or-tab-delimited ascii input file with the first column RA, the second DEC, and the third instrumental magnitude. Header/comments should be #-demarcated, and all non-commented rows in the file should be numbers only. If an output_file name is given, it saves the entire catalog to that file. usnob_thresh: the minium number of APASS+SDSS sources required before starting to use USNOB sources """ if quiet == 'False': quiet = False if alloptstars == 'False': alloptstars = False usnob_thresh = int(usnob_thresh) # load the data and produce a catalog in_data = np.loadtxt(input_file) input_coords = in_data[:, :2] input_mags = in_data[:, 2] field_center, field_width = find_field(input_coords) c = catalog(field_center, max(field_width), input_coords=input_coords) #c = catalog(field_center, max(field_width), input_coords=input_coords, ignore=['panstarrs']) band_index = FILTER_PARAMS[band][-1] # check to see whether we need to use USNOB sources #mask = np.array(c.modes) < 2 mask = np.array(c.modes) < 3 if sum(mask) >= usnob_thresh: if quiet: print('Using', sum(mask), 'APASS and/or SDSS sources.') cat_mags = c.SEDs[:, band_index][mask] cat_coords = c.coords[mask] else: if quiet: print('Using', sum(mask), 'USNOB, APASS, and/or SDSS sources.') cat_mags = c.SEDs[:, band_index] cat_coords = c.coords if alloptstars: zp, mad, matches, ze = calc_zeropoint(input_coords, c.ccoords, input_mags, c.scat[:, band_index], return_zps=True) errors = c.serr catmag = c.scat modes = c.cmodes else: zp, mad, matches = calc_zeropoint(input_coords, cat_coords, input_mags, cat_mags, return_zps=False) errors = c.full_errors catmag = c.SEDs modes = c.modes # save matched catalog to file if output_file: oc, os, oe, om = [], [], [], [] for i, match in enumerate(matches): oc.append(input_coords[i]) if match >= 0: os.append(catmag[match]) oe.append(errors[match]) om.append(modes[match]) else: os.append([99] * len(ALL_FILTERS)) oe.append([9] * len(ALL_FILTERS)) om.append(-1) save_catalog(oc, os, oe, om, output_file) return zp, mad
def Run_video_enhanced_varysize(model, batch, Mem_every=None, Mem_number=None, mode='train'): Fs, Ms, info = batch['Fs'], batch['Ms'], batch['info'] num_frames = info['num_frames'][0].item() if Mem_every: to_memorize = [ int(i) for i in np.arange(0, num_frames, step=Mem_every) ] elif Mem_number: to_memorize = [ int(round(i)) for i in np.linspace(0, num_frames, num=Mem_number + 2)[:-1] ] else: raise NotImplementedError b, c, f, h, w = Fs.shape Es = torch.zeros( (b, 1, f, h, w)).float().cuda() # [1,1,50,480,864][b,c,t,h,w] Es[:, :, 0] = Ms[:, :, 0] os = [] first_frame = Fs[:, :, 0].detach() first_mask = Ms[:, :, 0].detach() first_frame = first_frame * first_mask.repeat(1, 3, 1, 1).type(torch.float) for i in range(b): mask_ = first_mask[i] mask_ = mask_.squeeze(0).cpu().numpy().astype(np.uint8) assert np.any(mask_) x, y, w_, h_ = cv2.boundingRect(mask_) patch = first_frame[i, :, y:(y + h_), x:(x + w_)].cpu().numpy() Os = torch.zeros((1, c, h_, w_)) patch = patch.transpose(1, 2, 0) patch = patch.transpose(2, 0, 1) patch = torch.from_numpy(patch) Os[0, :, :, :] = patch os.append(Os) loss_video = torch.tensor(0.0).cuda() for t in range(1, num_frames): # memorize pre_key, pre_value = model([Fs[:, :, t - 1], Es[:, :, t - 1]]) pre_key = pre_key.unsqueeze(2) pre_value = pre_value.unsqueeze(2) if t - 1 == 0: # the first frame this_keys_m, this_values_m = pre_key, pre_value else: # other frame this_keys_m = torch.cat([keys, pre_key], dim=2) this_values_m = torch.cat([values, pre_value], dim=2) # segment logits, p_m2, p_m3 = model( [Fs[:, :, t], os, this_keys_m, this_values_m]) # B 2 h w em = F.softmax(logits, dim=1)[:, 1] # B h w Es[:, 0, t] = em # update key and value if t - 1 in to_memorize: keys, values = this_keys_m, this_values_m # calculate loss on cuda if mode == 'train' or mode == 'val': Ms_cuda = Ms[:, 0, t].cuda() loss_video += (_loss(logits, Ms_cuda) + 0.5 * _loss(p_m2, Ms_cuda) + 0.25 * _loss(p_m3, Ms_cuda)) # calculate mIOU on cuda pred = torch.round(Es.float().cuda()) if mode == 'train' or mode == 'val': video_mIoU = 0 for n in range(len(Ms)): # Nth batch video_mIoU = video_mIoU + get_video_mIoU( pred[n], Ms[n].float().cuda()) # mIOU of video(t frames) for each batch video_mIoU = video_mIoU / len(Ms) # mean IoU among batch return loss_video / num_frames, video_mIoU elif mode == 'test': return pred, Es
def analyze(filename): i = scipy.misc.imread(filename,1) i = i.astype(np.int32) shapes = connected_components(i) # merger artifacts with what they came from changed = True while changed: changed = False compute_qualitative(shapes) to_remove = None for s,z in [(s,z) for s in shapes for z in shapes ]: if s == z: continue if z.mass < tiny_threshold and s.mass >= z.mass and z in s.borders: s.merge_with(z) to_remove = z break if to_remove: changed = True shapes.remove(to_remove) if False: i[:] = WHITE for j,s in enumerate(shapes): s = s.mask i = i*(1-s) + (j+2)*s view(i*30) ns = len(shapes) # equivalence classes of identical shapes identical = {} for s in shapes: new_class = True for j,k in identical.items(): if new_class: for sp in k: if s.same_shape(sp): k.append(s) new_class = False break if new_class: identical[len(identical)] = [s] equivalent_mass = {} for j,k in identical.items(): equivalent_mass[j] = float(sum([s.mass for s in k ]))/len(k) next_label = 1 for i,ki in identical.items(): i_name = None for j in range(i): # is class i a rescaling of class j? rescaling = any([ s.rescaled_shape(sp) for s in ki for sp in identical[j] ]) if rescaling: # take their name i_name = identical[j][0].name ratio = equivalent_mass[i]/equivalent_mass[j] if ratio < 1.0: for s in ki: s.scale = ratio else: for s in identical[j]: s.scale = 1.0/ratio break if i_name == None: i_name = next_label next_label += 1 for s in ki: s.name = i_name # build output string os = [] for s in shapes: os.append("Shape(%i,%i,%i,%f)" % (s.x,s.y,s.name,s.scale)) os = ','.join(os) os = os + "\n" for s in xrange(0,ns): for sp in xrange(0,ns): if shapes[sp] in shapes[s].contains: os = os + "contains(" + str(s) + ", " + str(sp) + ")\n" for s in xrange(0,ns): for sp in xrange(0,ns): if s < sp and shapes[sp] in shapes[s].borders: os = os + "borders(" + str(s) + ", " + str(sp) + ")\n" return os
filein1=open(''+str(inputfile)+'') for line in filein1: n_freq += 1 filein1.close() filein1=open(''+str(inputfile)+'','r') spec_all=filein1.read() filein1.close() spec_text=spec_all.split('\n') for i_freq in range(n_freq-3): freq.append(float(spec_text[i_freq+3].split()[1])) os.append(1.0) # os.append(float(spec_text[i_freq+3].split()[1])) for i_e in range(ne): emission_energy[i_e]=e_min+float(i_e)*de emission_intensity[i_e]= 0.0 for i_freq in range(n_freq-3): lineshape = 1/ 3.1415926 * e_sigma / ( ( emission_energy[i_e] - freq[i_freq] )**2 + e_sigma**2 ) tmp_os = os[i_freq] * lineshape emission_intensity[i_e]= emission_intensity[i_e] + tmp_os filein4=open(''+str(outputfile)+'','w')
df['datetime'] = [get_datetime(row) for row in df.iloc] df = df.sort_values('datetime') events_flat = [] non_unique_actors = [] for i, row in enumerate(df.iloc): m = row['MD'] os = [] non_unique_actors.append(m) # extract all chimpanzees that observed the event. for j in range(8, df.shape[1]): if isinstance(row.iloc[j], str): os.append(row.iloc[j][:2]) events_flat.append( [m, row.iloc[j][:2], row['Day'], row['Novel technique']]) else: # missing string value means it's the end of the list of chimpanzees observing. # We also want to record the chimpanzee exhibiting the event to "itself" # for the purposes of tracking non-observed events. events_flat.append([m, m, row['Day'], row['Novel technique']]) break # add to the record of all chimpanzees. non_unique_actors += os # df_events = pandas.DataFrame( data=events_flat, columns=['Mediator', 'Observer', 'Day', 'Novel technique'])
def build_model(self): m_auditory = tf.placeholder(tf.float32, [self.batch_size, self.mem_size, self.dim_wav], name="m") q_visual = tf.placeholder(tf.float32, [self.batch_size, self.dim_img], name="q") y = tf.placeholder(tf.float32, [self.batch_size, 1], name="y") m_ = tf.reshape(m_auditory, [-1, self.dim_wav]) # sample * mem_size * dim -> (sample * mem_size) * dim m_input = tf.nn.xw_plus_b(m_, self.A, self.b_A) m_input = tf.reshape(m_input, [self.batch_size, self.mem_size, self.dim_mem]) m_input = tf.transpose(m_input, perm=[0, 2, 1]) + self.T_A # sample * dim_mem * mem_size m_input = tf.transpose(m_input, perm=[0, 2, 1]) # sample * mem_size * dim_mem m_output = tf.nn.xw_plus_b(m_, self.C, self.b_C) m_output = tf.reshape(m_output, [self.batch_size, self.mem_size, self.dim_mem]) m_output = tf.transpose(m_output, perm=[0, 2, 1]) + self.T_C # sample * dim_mem * mem_size m_output = tf.transpose(m_output, perm=[0, 2, 1]) # sample * mem_size * dim_mem u = tf.nn.xw_plus_b(q_visual, self.B, self.b_B) os = [] for _ in range(self.n_hop): u = tf.expand_dims(u, -1) # sample * dim_mem -> sample * dim_mem * 1 m_prob = tf.batch_matmul(m_input, u) m_prob = tf.squeeze(m_prob) m_prob = tf.nn.softmax(m_prob) # sample * mem_size weighted_m = tf.mul(m_output, tf.tile(tf.expand_dims(m_prob, 2), [1, 1, self.dim_mem])) o = tf.reduce_sum(weighted_m, 1) # sample * dim_mem u = tf.add(tf.squeeze(u), o) if self.nl: u = tf.nn.elu(u) os.append(u) train_hidden = valid_hidden = os[-1] train_hidden = tf.nn.dropout(train_hidden, self.do_prob) y_hat = tf.nn.xw_plus_b(train_hidden, self.W_o, self.b_o) v_y_hat = tf.nn.xw_plus_b(valid_hidden, self.W_o, self.b_o) loss = tf.reduce_mean(tf.square(y_hat - y)) regularizers = (tf.nn.l2_loss(self.A) + tf.nn.l2_loss(self.b_A) + tf.nn.l2_loss(self.B) + tf.nn.l2_loss(self.b_B) + tf.nn.l2_loss(self.C) + tf.nn.l2_loss(self.b_C) + tf.nn.l2_loss(self.T_A) + tf.nn.l2_loss(self.T_C) + tf.nn.l2_loss(self.W_o) + tf.nn.l2_loss(self.b_o)) loss += 5e-3 * regularizers lr = tf.train.exponential_decay( self.learning_rate, # Base learning rate. self.global_step, # Current index 28, # Decay step. 0.96, # Decay rate. staircase=True) train_op = tf.train.AdamOptimizer(lr).minimize(loss, global_step=self.global_step) return m_auditory, q_visual, y, loss, train_op, lr, v_y_hat