def check_compatible(client, min_version=None, max_version=None): """Checks if a kazoo client is backed by a zookeeper server version. This check will verify that the zookeeper server version that the client is connected to satisfies a given minimum version (inclusive) and maximum (inclusive) version range. If the server is not in the provided version range then a exception is raised indiciating this. """ server_version = None if min_version: server_version = tuple((int(a) for a in client.server_version())) min_version = tuple((int(a) for a in min_version)) if server_version < min_version: pretty_server_version = ".".join([str(a) for a in server_version]) min_version = ".".join([str(a) for a in min_version]) raise exc.IncompatibleVersion("Incompatible zookeeper version" " %s detected, zookeeper >= %s" " required" % (pretty_server_version, min_version)) if max_version: if server_version is None: server_version = tuple((int(a) for a in client.server_version())) max_version = tuple((int(a) for a in max_version)) if server_version > max_version: pretty_server_version = ".".join([str(a) for a in server_version]) max_version = ".".join([str(a) for a in max_version]) raise exc.IncompatibleVersion("Incompatible zookeeper version" " %s detected, zookeeper <= %s" " required" % (pretty_server_version, max_version))
def stop(self): """ Stop the daemon """ # Get the pid from the pidfile try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1)
def main(): ptt_dir = '/tmp2/GorsachiusMelanolophus/ptt_posts_new/no_sponsored/' imgs_dir = '/tmp2/GorsachiusMelanolophus/ptt_imgs/no_sponsored/' start = int(sys.argv[1]) end = int(sys.argv[2]) fp = open('../img_num/' + str(start)+ '.txt', 'a') for i in range(start, end): try: post_path = ptt_dir + str(i) + '.p' post = pickle.load(open(post_path, 'rb')) url = ptt_url + post['href'] webpage = get_webpage(url) imgs, blog_url = parse_post(webpage) if imgs: print(f'{i}:{len(imgs)}', file=fp) save(imgs, imgs_dir + str(i)) elif blog_url: webpage = get_webpage(blog_url) imgs = get_imgs_blog(webpage) if imgs: print(f'{i}:{len(imgs)}', file=fp) save(imgs, imgs_dir + str(i)) except KeyboardInterrupt: return 0 except Exception as e: print(e) pass
def IncrementVersion(self): """Updates the version file by incrementing the patch component. Args: message: Commit message to use when incrementing the version. dry_run: Git dry_run. """ if not self.incr_type or self.incr_type not in self.VALID_INCR_TYPES: raise VersionUpdateException('Need to specify the part of the version to' ' increment') if self.incr_type == 'chrome_branch': self.chrome_branch = str(int(self.chrome_branch) + 1) # Increment build_number for 'chrome_branch' incr_type to avoid # crbug.com/213075. if self.incr_type in ('build', 'chrome_branch'): self.build_number = str(int(self.build_number) + 1) self.branch_build_number = '0' self.patch_number = '0' elif self.incr_type == 'branch' and self.patch_number == '0': self.branch_build_number = str(int(self.branch_build_number) + 1) else: self.patch_number = str(int(self.patch_number) + 1) return self.VersionString()
def map(self): mc=MongoClient('ec2-52-0-148-244.compute-1.amazonaws.com',27017) dbmc=mc.genid idoc=dbmc.gentable.find_one_and_update(filter={},update={ "$inc": { "score": 1 } },upsert=True); k=Key(self.bucket) y=stopwords.words('english') i=1 strx=str(int(idoc['score'])) strz=None filestring="" for line in sys.stdin: line = unicode(line, "utf-8","ignore") pattern = re.compile(r'\b(' + r'|'.join(y) + r')\b\s*') line = pattern.sub('', line) tokenizer = RegexpTokenizer(r'\w+') words=tokenizer.tokenize(line) strz=strx+'a'+str(i) k.key=strz filestring=line+'\n' k.set_contents_from_string(filestring) for word in words: word=word.encode(encoding='UTF-8',errors='ignore') print '%s\t%s' % (word.strip(), strz) i+=1
def exp(inF1,inF2): G = Gene(inF1) ouFile = open(inF1 + '.exp', 'w') ouFile.write('Gene\tMock\tMERS\n') D = {} inFile = open(inF2) head = inFile.readline() for line in inFile: line = line.strip() fields = line.split('\t') gene = fields[1] D.setdefault(gene, []) #mock = (float(fields[2]) + float(fields[3]))/2 #rsv20h = (float(fields[14]) + float(fields[15]))/2 Mock = np.median([float(fields[2]), float(fields[3]), float(fields[4])]) MERS = np.median([float(fields[5]), float(fields[6]), float(fields[7])]) D[gene].append([Mock,MERS]) inFile.close() for g in G: if g in D: if len(D[g]) > 1: #print(D[g]) pass ouFile.write(g + '\t' + str(D[g][0][0]) + '\t' + str(D[g][0][1]) + '\n') ouFile.close()
def __del__(self): """Destructor. Removes the pidfile, if it was created by ourselfes.""" if not self.created: return if not os.path.exists(self.filename): if self.verbose > 3: log.debug( _("Pidfile '%s' doesn't exists, not removing."), self.filename) return if not self.auto_remove: if self.verbose > 3: log.debug( _("Auto removing disabled, don't deleting '%s'."), self.filename) return if self.verbose > 1: log.debug(_("Removing pidfile '%s' ..."), self.filename) if self.simulate: if self.verbose > 1: log.debug(_("Just kidding ..")) return try: os.remove(self.filename) except OSError as e: log.err( _("Could not delete pidfile %(file)r: %(err)s"), self.filename, str(e)) except Exception as e: self.handle_error(str(e), e.__class__.__name__, True)
def generate_inventory_data(self, inventories, is_online=1): start_day = datetime.date.today() end_day = start_day + datetime.timedelta(days=89) manual_confirm_count = [] if is_online == 1: day = start_day while day <= end_day: month = int("{}{:0>2d}".format(day.year, day.month)) for inventory in inventories: if inventory.month == month: manual_confirm_count.append(str(inventory.get_day(day.day))) break day = day + datetime.timedelta(days=1) else: #manual_confirm_count = ['0' for i in range(90)] manual_confirm_count = '0' * 90 data = {} data['chain_id'] = CHAIN_ID if inventories[0].merchant_id in SPEC_STOCK_PUSH: data['ota_id'] = SPEC_STOCK_PUSH[inventories[0].merchant_id] else: data['ota_id'] = 0 data['hotel_id'] = str(inventories[0].hotel_id) data['room_type_id'] = str(inventories[0].roomtype_id) data['manual_confirm_counts'] = '|'.join(manual_confirm_count) data['start_date'] = start_day data['end_date'] = end_day data['is_soldout_auto_close'] = 1 return data
def widget_attrs(self, widget): par_attrs = super(Html5IntegerField, self).widget_attrs(widget) if self.max_value is not None: par_attrs.update({'max': str(self.max_value)}) if self.min_value is not None: par_attrs.update({'min': str(self.min_value)}) return par_attrs
def showScaledSqFlucts(modes, *args, **kwargs): """Show scaled square fluctuations using :func:`~matplotlib.pyplot.plot`. Modes or mode sets given as additional arguments will be scaled to have the same mean squared fluctuations as *modes*.""" import matplotlib.pyplot as plt sqf = calcSqFlucts(modes) mean = sqf.mean() args = list(args) modesarg = [] i = 0 while i < len(args): if isinstance(args[i], (VectorBase, ModeSet, NMA)): modesarg.append(args.pop(i)) else: i += 1 show = [plt.plot(sqf, *args, label=str(modes), **kwargs)] plt.xlabel('Indices') plt.ylabel('Square fluctuations') for modes in modesarg: sqf = calcSqFlucts(modes) scalar = mean / sqf.mean() show.append(plt.plot(sqf * scalar, *args, label='{0} (x{1:.2f})'.format(str(modes), scalar), **kwargs)) if SETTINGS['auto_show']: showFigure() return show
def showNormedSqFlucts(modes, *args, **kwargs): """Show normalized square fluctuations via :func:`~matplotlib.pyplot.plot`. """ import matplotlib.pyplot as plt sqf = calcSqFlucts(modes) args = list(args) modesarg = [] i = 0 while i < len(args): if isinstance(args[i], (VectorBase, ModeSet, NMA)): modesarg.append(args.pop(i)) else: i += 1 show = [plt.plot(sqf/(sqf**2).sum()**0.5, *args, label='{0}'.format(str(modes)), **kwargs)] plt.xlabel('Indices') plt.ylabel('Square fluctuations') for modes in modesarg: sqf = calcSqFlucts(modes) show.append(plt.plot(sqf/(sqf**2).sum()**0.5, *args, label='{0}'.format(str(modes)), **kwargs)) if SETTINGS['auto_show']: showFigure() return show
def process(self, device, results, log): """collect snmp information from this device""" log.info('processing %s for device %s', self.name(), device.id) getdata, tabledata = results topos = tabledata.get("topo") device = tabledata.get("deviceip") # Debug: print data retrieved from device. log.debug( "Get data = %s", getdata ) log.debug( "Table data = %s", tabledata ) # If no data retrieved return nothing. if not topos: log.warn('No data collected from %s for the %s plugin', device.id, self.name()) return if not device: log.warn('No data collected from %s for the %s plugin', device.id, self.name()) return rm = self.relMap() for oid, data in topos.iteritems(): try: om = self.objectMap(data) om.id = self.prepId(om.ipaddr) om.macaddr = self.asmac(om.macaddr) if om.chassistype not in self.type.keys(): om.chassistype = 1 om.chassistype = self.type[om.chassistype] om.localint = "Slot " + str(om.slot)+ "," + " Port " + str(om.port) if device.has_key(om.ipaddr): om.clear() except AttributeError: continue rm.append(om) return rm
def showOverlapTable(modes_x, modes_y, **kwargs): """Show overlap table using :func:`~matplotlib.pyplot.pcolor`. *modes_x* and *modes_y* are sets of normal modes, and correspond to x and y axes of the plot. Note that mode indices are incremented by 1. List of modes is assumed to contain a set of contiguous modes from the same model. Default arguments for :func:`~matplotlib.pyplot.pcolor`: * ``cmap=plt.cm.jet`` * ``norm=plt.normalize(0, 1)``""" import matplotlib.pyplot as plt overlap = abs(calcOverlap(modes_y, modes_x)) if overlap.ndim == 0: overlap = np.array([[overlap]]) elif overlap.ndim == 1: overlap = overlap.reshape((modes_y.numModes(), modes_x.numModes())) cmap = kwargs.pop('cmap', plt.cm.jet) norm = kwargs.pop('norm', plt.normalize(0, 1)) show = (plt.pcolor(overlap, cmap=cmap, norm=norm, **kwargs), plt.colorbar()) x_range = np.arange(1, modes_x.numModes() + 1) plt.xticks(x_range-0.5, x_range) plt.xlabel(str(modes_x)) y_range = np.arange(1, modes_y.numModes() + 1) plt.yticks(y_range-0.5, y_range) plt.ylabel(str(modes_y)) plt.axis([0, modes_x.numModes(), 0, modes_y.numModes()]) if SETTINGS['auto_show']: showFigure() return show
def gen_cut(m, a): md = '## '+a['title'] if a['qty'] > 1: md += ' (x'+str(a['qty'])+')' md += '\n\n' # vitamins if len(a['vitamins']) > 0: a['vitamins'].sort(key=vitamin_call, reverse=False) md += '### Vitamins\n\n' md += 'Qty | Vitamin | Image\n' md += '--- | --- | ---\n' for v in a['vitamins']: md += str(v['qty']) + ' | ' md += '['+v['title']+']() | ' md += '![](../vitamins/images/'+views.view_filename(v['title']+'_view') + ') | ' md += '\n' md += '\n' # fabrication steps if len(a['steps']) > 0: md += '### Fabrication Steps\n\n' for step in a['steps']: md += str(step['num']) + '. '+step['desc'] + '\n' for view in step['views']: md += '![](../cutparts/images/'+views.view_filename(a['title']+'_step'+str(step['num'])+'_'+view['title'])+')\n' md += '\n' md += '\n' return md
def consolidate_results(self): dicts = [] for file in os.listdir(self.results_directory): if file.startswith(self.data_name + '_results_'): f1 = open(self.results_directory+ file, 'r') my_dict = eval(f1.read()) dicts.append(my_dict) run_nums = [' '] run_nums.extend([str(r) for r in range(0,len(dicts))]) print 'Found ' + str(len(dicts)) + ' result sets' full_results_loc = self.results_directory + self.data_name + '_full_results_transpose.csv' with open(full_results_loc, 'wb') as ofile: writer = csv.writer(ofile, delimiter=',') writer.writerow(run_nums) for key in dicts[0].iterkeys(): writer.writerow([key] + [d[key] for d in dicts]) #this file has all the info - but to bring into pandas we want to transpose the data df = pd.read_csv(full_results_loc, index_col=0) df2 = df.transpose() #save off the results file full_results_loc2 = self.results_directory + self.data_name + '_full_results.csv' print 'Saving: ' + full_results_loc2 df2.to_csv(full_results_loc2, delimiter=',')
def fitDataButton(self): plotName = self.popup_win.getPlotName() ymin = self.data.get_min_sensor_value(str(plotName)) ymax = self.data.get_max_sensor_value(str(plotName)) tmax = self.data.get_max_sensor_value('time') self.figures[plotName + "plot"].setRange(xRange=(0, tmax), yRange=(ymin, ymax)) self.popup_win.close()
def updateLineEdits(self): i = self.options.index(self.dropdown_box.currentText()) currRange = self.plot_items[i].viewRange() #returns current views visible range in format [[xmin, xmax], [ymin, ymax]] self.xmin.setText(str(currRange[0][0])) self.xmax.setText(str(currRange[0][1])) self.ymin.setText(str(currRange[1][0])) self.ymax.setText(str(currRange[1][1]))
def sendReport(self): name = str(self.mainwindow.profile().handle) bestname = str(self.name.text()) os = ostools.osVer() full = ostools.platform.platform() python = ostools.platform.python_version() qt = core.qVersion() msg = str(self.textarea.toPlainText()) if len(bestname) <= 0 or len(msg) <= 0: msgbox = gui.QMessageBox() msgbox.setStyleSheet(self.mainwindow.theme["main/defaultwindow/style"]) msgbox.setText("You must fill out all fields first!") msgbox.setStandardButtons(gui.QMessageBox.Ok) ret = msgbox.exec_() return widgets.QDialog.accept(self) data = urllib.parse.urlencode({"name":name, "version": version._pcVersion, "bestname":bestname, "os":os, "platform":full, "python":python, "qt":qt, "msg":msg}) print("Sending...") f = urllib.request.urlopen("http://distantsphere.com/pc/reporter.php", data) text = f.read() print(text) if text == "success!": print("Sent!") else: print("Problems ):")
def recognise_eHentai(link, path): url = str(link) page = urllib2.urlopen(url).read() soup = BeautifulSoup(page) name = soup.findAll('title') name = name[0].get_text().encode('utf-8') name = str(name) path = path + '\\' + name download_eHentai(link, path) pages = soup.find_all('span') pages = pages[1].get_text() pages = int(pages) z = 0 while (pages > z): z = z + 1 sopa = soup.find('div', 'sn') sopa = sopa.find_all('a') sopa = sopa[2].get('href') url = str(sopa) download_eHentai(url, path) page = urllib2.urlopen(url).read() soup = BeautifulSoup(page) sopa = soup.find('div', 'sn') sopa = sopa.find_all('a') sopa = sopa[2].get('href') download_eHentai(sopa, path)
def urbanairship_set_channels(request): # {'channel': 'EMBMobility', 'device': ['8c85a4add9947ff507158251a4a639084e9f8f6bf5260433d148257a625ebd85'], 'app_key': '3gutVRHDQ0O25Hlu2wc9lg', 'operation': "delete",} data = eval(str(request.body)) devices = data['device'] app_key = data['app_key'] channel = data['channel'] operation = data['operation'] # add/delete/edit if operation == 'edit': new_channel = data['new_channel'] try: devices = APNSDevices.objects.filter(app_key=app_key, registration_id__in=devices) for device in devices: if device.channels: if operation == 'add': channels = device.channels + channel + '##' elif operation == 'delete': channels = device.channels.replace(channel+ '##', '') elif operation == 'edit': channels = device.channels.replace(channel+ '##', new_channel + '##') device.channels = channels else: if operation == 'add': device.channels = channel + '##' device.save() except Exception, e: print str(e) pass
def main(): #set up loop start_date = datetime.date(2003,01,01) end_date = datetime.date(2008,12,31) d = start_date delta = datetime.timedelta(days=1) while d <= end_date: #set up url url = 'http://www.cloud-net.org/quicklooks/data/chilbolton/products/classification/' \ + str(d.strftime("%Y")) \ + '/' + str(d.strftime("%Y%m%d")) \ + '_chilbolton_classification.png' #check if exists code = urllib.urlopen(url).code if (code / 100 >= 4): print "No data for ", str(d.strftime("%Y%m%d")), ' continuing...' d += delta continue #check size urllib.urlretrieve(url, str(d.strftime("%Y%m%d")) + '.png') #increment date print 'Succesfully retrieved ', str(d.strftime("%Y%m%d")), ' moving on...' d += delta
def _get_sales_data(self, cr, uid, period_ids, curr_period, sale_id, context=None): if not curr_period: curr_p = self.pool.get('account.period').find(cr, uid, context=context)[0] period_ids = [x for x in period_ids if x != curr_p] if not sale_id: sql = """ select ml.partner_id as pid, ml.period_id as period, p.name as pnm, p.is_company as company, sum(ml.credit - ml.debit) as amount from account_move_line ml left join res_partner p on (ml.partner_id = p.id) left join account_account ac on (ml.account_id = ac.id) where ml.period_id in %s and ac.reports = True group by ml.partner_id, ml.period_id, p.name, p.is_company order by ml.partner_id, ml.period_id """ % (str(tuple(period_ids))) else: sql = """ select ml.partner_id as pid, ml.period_id as period, p.name as pnm, p.is_company as company, sum(ml.credit - ml.debit) as amount from account_move_line ml left join account_invoice inv on (ml.move_id = inv.move_id) left join res_partner p on (ml.partner_id = p.id) left join account_account ac on (ml.account_id = ac.id) where ml.period_id in %s and inv.user_id = %s and ac.reports = True group by ml.partner_id, ml.period_id, p.name, p.is_company order by ml.partner_id, ml.period_id """ % (str(tuple(period_ids)), sale_id) cr.execute(sql) return cr.dictfetchall()
def get_scene(base_path, scenes_dir, curr_i, configs): #curr_i = (scene_i + 1) % scene_count #scene_i = curr_i config_path = os.path.join(scenes_dir, 'ConfigScene' + str(curr_i) + '.ini') if configs != None: config_path = configs[curr_i] print "config_path: ", config_path config = ConfigParser.ConfigParser() #print "config path: ", config_path print "config read: ", config.read(config_path) print "config sections: ", config.sections() models_count = int(map_section(config, "MODELS")['number'].strip('\"')) model_path = [""]*models_count model_trans_path = [""]*models_count for i in range(models_count): model_path[i] = base_path + map_section(config, "MODELS")['model_' + str(i)].strip('\"') #root, ext = os.path.splitext(model_path[i]) #model_path[i] = root + '_0' + ext model_trans_path[i] = base_path + map_section(config, "MODELS")['model_' + str(i) + '_groundtruth'].strip('\"') scene_path = base_path + map_section(config, "SCENE")['path'].strip('\"') root, ext = os.path.splitext(scene_path) scene_path = root + '_0.1' + ext #scene_path = root + '_0' + ext return model_path, model_trans_path, scene_path
def get(self, now=None): """Shows template displaying the configured cron jobs.""" if not now: now = datetime.datetime.now() values = {'request': self.request} cron_info = _ParseCronYaml() values['cronjobs'] = [] values['now'] = str(now) if cron_info and cron_info.cron: for entry in cron_info.cron: job = {} values['cronjobs'].append(job) if entry.description: job['description'] = entry.description else: job['description'] = '(no description)' if entry.timezone: job['timezone'] = entry.timezone job['url'] = entry.url job['schedule'] = entry.schedule schedule = groctimespecification.GrocTimeSpecification(entry.schedule) matches = schedule.GetMatches(now, 3) job['times'] = [] for match in matches: job['times'].append({'runtime': match.strftime("%Y-%m-%d %H:%M:%SZ"), 'difference': str(match - now)}) self.generate('cron.html', values)
def test_invalid_final_lookup(self): qs = Book.objects.prefetch_related('authors__name') with self.assertRaises(ValueError) as cm: list(qs) self.assertTrue('prefetch_related' in str(cm.exception)) self.assertTrue("name" in str(cm.exception))
def get_ruler_distances(self, x1, y1, x2, y2): mode = self.t_drawparams.get('units', 'arcmin') try: image = self.fitsimage.get_image() if mode == 'arcmin': # Calculate RA and DEC for the three points # origination point ra_org, dec_org = image.pixtoradec(x1, y1) # destination point ra_dst, dec_dst = image.pixtoradec(x2, y2) # "heel" point making a right triangle ra_heel, dec_heel = image.pixtoradec(x2, y1) text_h = image.get_starsep_RaDecDeg(ra_org, dec_org, ra_dst, dec_dst) text_x = image.get_starsep_RaDecDeg(ra_org, dec_org, ra_heel, dec_heel) text_y = image.get_starsep_RaDecDeg(ra_heel, dec_heel, ra_dst, dec_dst) else: dx = abs(x2 - x1) dy = abs(y2 - y1) dh = math.sqrt(dx**2 + dy**2) text_x = str(dx) text_y = str(dy) text_h = ("%.3f" % dh) except Exception, e: text_h = 'BAD WCS' text_x = 'BAD WCS' text_y = 'BAD WCS'
def create_cube_request(modelname, px, py, pz, rr, rp, ry, sx, sy, sz): """Create a SpawnModelRequest with the parameters of the cube given. modelname: name of the model for gazebo px py pz: position of the cube (and it's collision cube) rr rp ry: rotation (roll, pitch, yaw) of the model sx sy sz: size of the cube""" cube = deepcopy(sdf_cube) # Replace size of model size_str = str(round(sx, 3)) + " " + \ str(round(sy, 3)) + " " + str(round(sz, 3)) cube = cube.replace('SIZEXYZ', size_str) # Replace modelname cube = cube.replace('MODELNAME', str(modelname)) req = SpawnModelRequest() req.model_name = modelname req.model_xml = cube req.initial_pose.position.x = px req.initial_pose.position.y = py req.initial_pose.position.z = pz q = quaternion_from_euler(rr, rp, ry) req.initial_pose.orientation.x = q[0] req.initial_pose.orientation.y = q[1] req.initial_pose.orientation.z = q[2] req.initial_pose.orientation.w = q[3] return req
def start_server(self, host, listen_port, txt_lable): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) print("*" + host + "*") print("*" + str(listen_port) + "*") s.bind((host, listen_port)) # 绑定服务ip和端口 s.listen(10) # 打开监听,数字大概就是表示可等待连接数。 while True: conn, addr = s.accept() print("get request from :", addr) data = conn.recv(2048) print(data) if data.decode('utf-8') == "comtest()": conn.sendall("1".encode("utf-8")) elif self.response != "": conn.sendall(self.response.encode("utf-8")) conn.close() # 1次连接1次数据 # 界面元素操作得放在后面,不然会影响tcp处理超时。 txt_lable.insert(1.0, datetime.datetime.now().strftime('%m-%d %H:%M:%S.%f')[ :-3] + "-receive data from :" + str(addr) + ": " + data.decode('utf-8') + "\n") if self.response != "": txt_lable.insert(1.0, datetime.datetime.now().strftime('%m-%d %H:%M:%S.%f')[ :-3] + "-send response: " + self.response + "\n") txt_lable.update()
def ascii(self): """The ascii representation of the detector""" comments = '' if self.version == 'v3': for comment in self.comments: if not comment.startswith(' '): comment = ' ' + comment comments += "#" + comment + "\n" if self.version == 'v1': header = "{det.det_id} {det.n_doms}".format(det=self) else: header = "{det.det_id} {det.version}".format(det=self) header += "\n{0} {1}".format(self.valid_from, self.valid_until) header += "\n" + str(self.utm_info) + "\n" header += str(self.n_doms) doms = "" for dom_id, (line, floor, n_pmts) in self.doms.items(): doms += "{0} {1} {2} {3}\n".format(dom_id, line, floor, n_pmts) for channel_id in range(n_pmts): pmt_idx = self._pmt_index_by_omkey[(line, floor, channel_id)] pmt = self.pmts[pmt_idx] doms += " {0} {1} {2} {3} {4} {5} {6} {7}".format( pmt.pmt_id, pmt.pos_x, pmt.pos_y, pmt.pos_z, pmt.dir_x, pmt.dir_y, pmt.dir_z, pmt.t0 ) if self.version == 'v3': doms += " {0}".format(pmt.status) doms += "\n" return comments + header + "\n" + doms
def set_channels(request): data = eval(str(request.body)) devices = data['device'] emb_token = data['token'] channel = data['channel'] operation = data['operation'] # add/delete/edit if operation == 'edit': new_channel = data['new_channel'] try: p12_certificate = P12Certificate.objects.get(emb_token=emb_token) devices = APNSDevices.objects.filter(p12_certificate=p12_certificate, registration_id__in=devices) for device in devices: if device.channels: if operation == 'add': channels = device.channels + channel + '##' elif operation == 'delete': channels = device.channels.replace(channel+ '##', '') elif operation == 'edit': channels = device.channels.replace(channel+ '##', new_channel + '##') device.channels = channels else: if operation == 'add': device.channels = channel + '##' device.save() except Exception, e: print str(e) pass
def __repr__(self): return ", ".join( (str(self.__class__.__name__), self.name, self.dir_path, str(self.dir_count), str(self.mov_count)))
async def on_message_edit(self, before, after): if not after.guild and not (before.content == after.content or after.author.bot): ctx = await self.bot.get_context(after) return await self.bot.invoke(ctx) logch = self.bot.configs[after.guild.id].get('log.action') if after.channel.is_news(): if before.flags.crossposted != after.flags.crossposted: if logch: embed = discord.Embed( color=discord.Color.green(), timestamp=after.created_at, description= f'**A message was published in** {after.channel.mention}' ) embed.set_author(name=after.guild.name, icon_url=str(after.guild.icon_url)) embed.add_field(name='Message Author', value=after.author.mention, inline=False) embed.add_field(name='Message', value=f'[Click Here]({after.jump_url})', inline=False) embed.set_footer( text= f"Author ID: {after.author.id} | Message ID: {after.id} | Channel ID: {after.channel.id}" ) try: await logch.send(embed=embed) except Exception: pass if before.content == after.content or after.author.bot: return message = after excluded = self.bot.configs[message.guild.id].get('excluded.filter') roleids = [r.id for r in message.author.roles] if message.author.id not in excluded and not any( r in excluded for r in roleids) and message.channel.id not in excluded: filters = self.bot.get_cog('Filters') # with suppress(Exception): await self.safe_exc(filters.handle_invite, message) await self.safe_exc(filters.anti_malware, message) await self.safe_exc(filters.handle_paypal, message) await self.safe_exc(filters.handle_youtube, message) await self.safe_exc(filters.handle_twitch, message) await self.safe_exc(filters.handle_twitter, message) if logch: embed = discord.Embed( color=after.author.color, timestamp=after.created_at, description= f'{after.author.mention} **edited a message in** {after.channel.mention}' ) embed.set_author(name=after.author, icon_url=str( after.author.avatar_url_as( static_format='png', size=2048))) bcontent = before.system_content[:300] + ( before.system_content[300:] and '...') acontent = after.system_content[:300] + (after.system_content[300:] and '...') embed.add_field(name='Before', value=bcontent, inline=False) embed.add_field(name='After', value=acontent, inline=False) embed.set_footer( text= f"Author ID: {after.author.id} | Message ID: {after.id} | Channel ID: {after.channel.id}" ) try: await logch.send(embed=embed) except Exception: pass ctx = await self.bot.get_context(after) await self.bot.invoke(ctx)
def main(config_path): """ Train a DMSAD on the MURA dataset using a AE pretraining. """ # Load config file cfg = Config(settings=None) cfg.load_config(config_path) # Get path to output OUTPUT_PATH = cfg.settings['PATH']['OUTPUT'] + cfg.settings['Experiment_Name'] + datetime.today().strftime('%Y_%m_%d_%Hh%M')+'/' # make output dir if not os.path.isdir(OUTPUT_PATH+'models/'): os.makedirs(OUTPUT_PATH+'model/', exist_ok=True) if not os.path.isdir(OUTPUT_PATH+'results/'): os.makedirs(OUTPUT_PATH+'results/', exist_ok=True) if not os.path.isdir(OUTPUT_PATH+'logs/'): os.makedirs(OUTPUT_PATH+'logs/', exist_ok=True) for seed_i, seed in enumerate(cfg.settings['seeds']): ############################### Set Up ################################# # initialize logger logging.basicConfig(level=logging.INFO) logger = logging.getLogger() try: logger.handlers[1].stream.close() logger.removeHandler(logger.handlers[1]) except IndexError: pass logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s') log_file = OUTPUT_PATH + 'logs/' + f'log_{seed_i+1}.txt' file_handler = logging.FileHandler(log_file) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) # print path logger.info(f"Log file : {log_file}") logger.info(f"Data path : {cfg.settings['PATH']['DATA']}") logger.info(f"Outputs path : {OUTPUT_PATH}" + "\n") # Set seed if seed != -1: random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True logger.info(f"Set seed {seed_i+1:02}/{len(cfg.settings['seeds']):02} to {seed}") # set number of thread if cfg.settings['n_thread'] > 0: torch.set_num_threads(cfg.settings['n_thread']) # check if GPU available cfg.settings['device'] = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # Print technical info in logger logger.info(f"Device : {cfg.settings['device']}") logger.info(f"Number of thread : {cfg.settings['n_thread']}") ############################### Split Data ############################# # Load data informations df_info = pd.read_csv(cfg.settings['PATH']['DATA_INFO']) df_info = df_info.drop(df_info.columns[0], axis=1) # remove low contrast images (all black) df_info = df_info[df_info.low_contrast == 0] # Train Validation Test Split spliter = MURA_TrainValidTestSplitter(df_info, train_frac=cfg.settings['Split']['train_frac'], ratio_known_normal=cfg.settings['Split']['known_normal'], ratio_known_abnormal=cfg.settings['Split']['known_abnormal'], random_state=42) spliter.split_data(verbose=False) train_df = spliter.get_subset('train') valid_df = spliter.get_subset('valid') test_df = spliter.get_subset('test') # print info to logger for key, value in cfg.settings['Split'].items(): logger.info(f"Split param {key} : {value}") logger.info("Split Summary \n" + str(spliter.print_stat(returnTable=True))) ############################# Build Model ############################# # make networks net_AE = AE_net(MLP_Neurons_layer_enc=cfg.settings['AE']['MLP_head_enc'], MLP_Neurons_layer_dec=cfg.settings['AE']['MLP_head_dec'], output_channels=1) net_AE = net_AE.to(cfg.settings['device']) net_DMSAD = Encoder(MLP_Neurons_layer=cfg.settings['DMSAD']['MLP_head']) net_DMSAD = net_DMSAD.to(cfg.settings['device']) # print network architecture net_architecture = summary_string(net_AE, (1, cfg.settings['Split']['img_size'], cfg.settings['Split']['img_size']), batch_size=cfg.settings['AE']['batch_size'], device=str(cfg.settings['device'])) logger.info("AE net architecture: \n" + net_architecture + '\n') net_architecture = summary_string(net_DMSAD, (1, cfg.settings['Split']['img_size'], cfg.settings['Split']['img_size']), batch_size=cfg.settings['DMSAD']['batch_size'], device=str(cfg.settings['device'])) logger.info("DMSAD net architecture: \n" + net_architecture + '\n') # make model ae_DMSAD = AE_DMSAD(net_AE, net_DMSAD, eta=cfg.settings['DMSAD']['eta'], gamma=cfg.settings['DMSAD']['gamma']) ############################### Train AE ############################### # make dataset train_dataset_AD = MURA_Dataset(train_df, data_path=cfg.settings['PATH']['DATA'], mask_img=True, output_size=cfg.settings['Split']['img_size']) valid_dataset_AD = MURA_Dataset(valid_df, data_path=cfg.settings['PATH']['DATA'], mask_img=True, output_size=cfg.settings['Split']['img_size']) test_dataset_AD = MURA_Dataset(test_df, data_path=cfg.settings['PATH']['DATA'], mask_img=True, output_size=cfg.settings['Split']['img_size']) logger.info("Online preprocessing pipeline : \n" + str(train_dataset_AD.transform) + "\n") # Load model if required if cfg.settings['AE']['model_path_to_load']: ae_DMSAD.load_repr_net(cfg.settings['AE']['model_path_to_load'], map_location=cfg.settings['device']) logger.info(f"AE Model Loaded from {cfg.settings['AE']['model_path_to_load']}" + "\n") # print Train parameters for key, value in cfg.settings['AE'].items(): logger.info(f"AE {key} : {value}") # Train AE ae_DMSAD.train_AE(train_dataset_AD, valid_dataset=None, n_epoch=cfg.settings['AE']['n_epoch'], batch_size=cfg.settings['AE']['batch_size'], lr=cfg.settings['AE']['lr'], weight_decay=cfg.settings['AE']['weight_decay'], lr_milestone=cfg.settings['AE']['lr_milestone'], n_job_dataloader=cfg.settings['AE']['num_worker'], device=cfg.settings['device'], print_batch_progress=cfg.settings['print_batch_progress']) # Evaluate AE to get embeddings ae_DMSAD.evaluate_AE(valid_dataset_AD, batch_size=cfg.settings['AE']['batch_size'], n_job_dataloader=cfg.settings['AE']['num_worker'], device=cfg.settings['device'], print_batch_progress=cfg.settings['print_batch_progress'], set='valid') ae_DMSAD.evaluate_AE(test_dataset_AD, batch_size=cfg.settings['AE']['batch_size'], n_job_dataloader=cfg.settings['AE']['num_worker'], device=cfg.settings['device'], print_batch_progress=cfg.settings['print_batch_progress'], set='test') # save repr net ae_DMSAD.save_ae_net(OUTPUT_PATH + f'model/AE_net_{seed_i+1}.pt') logger.info("AE model saved at " + OUTPUT_PATH + f"model/AE_net_{seed_i+1}.pt") # save Results ae_DMSAD.save_results(OUTPUT_PATH + f'results/results_{seed_i+1}.json') logger.info("Results saved at " + OUTPUT_PATH + f"results/results_{seed_i+1}.json") ######################## Transfer Encoder Weight ####################### ae_DMSAD.transfer_encoder() ############################## Train DMSAD ############################# # Load model if required if cfg.settings['DMSAD']['model_path_to_load']: ae_DMSAD.load_AD(cfg.settings['DMSAD']['model_path_to_load'], map_location=cfg.settings['device']) logger.info(f"DMSAD Model Loaded from {cfg.settings['DMSAD']['model_path_to_load']} \n") # print Train parameters for key, value in cfg.settings['DMSAD'].items(): logger.info(f"DMSAD {key} : {value}") # Train DMSAD ae_DMSAD.train_AD(train_dataset_AD, valid_dataset=valid_dataset_AD, n_sphere_init=cfg.settings['DMSAD']['n_sphere_init'], n_epoch=cfg.settings['DMSAD']['n_epoch'], batch_size=cfg.settings['DMSAD']['batch_size'], lr=cfg.settings['DMSAD']['lr'], weight_decay=cfg.settings['DMSAD']['weight_decay'], lr_milestone=cfg.settings['DMSAD']['lr_milestone'], n_job_dataloader=cfg.settings['DMSAD']['num_worker'], device=cfg.settings['device'], print_batch_progress=cfg.settings['print_batch_progress']) logger.info('--- Validation') ae_DMSAD.evaluate_AD(valid_dataset_AD, batch_size=cfg.settings['DMSAD']['batch_size'], n_job_dataloader=cfg.settings['DMSAD']['num_worker'], device=cfg.settings['device'], print_batch_progress=cfg.settings['print_batch_progress'], set='valid') logger.info('--- Test') ae_DMSAD.evaluate_AD(test_dataset_AD, batch_size=cfg.settings['DMSAD']['batch_size'], n_job_dataloader=cfg.settings['DMSAD']['num_worker'], device=cfg.settings['device'], print_batch_progress=cfg.settings['print_batch_progress'], set='test') # save DMSAD ae_DMSAD.save_AD(OUTPUT_PATH + f'model/DMSAD_{seed_i+1}.pt') logger.info("model saved at " + OUTPUT_PATH + f"model/DMSAD_{seed_i+1}.pt") ########################## Save Results ################################ # save Results ae_DMSAD.save_results(OUTPUT_PATH + f'results/results_{seed_i+1}.json') logger.info("Results saved at " + OUTPUT_PATH + f"results/results_{seed_i+1}.json") # save config file cfg.settings['device'] = str(cfg.settings['device']) cfg.save_config(OUTPUT_PATH + 'config.json') logger.info("Config saved at " + OUTPUT_PATH + "config.json")
print(sentence) def anagram_gen(words): temp = [] word = list(words) # print(word) while len(word) > 0: rand_index = random.randint(0, len(word) - 1) temp.append(word[rand_index]) word.remove(word[rand_index]) print("".join(temp)) if __name__ == '__main__': # this is for reverse str words = str(sys.argv[1:]).replace("[", "").replace("]", "") # print(words) # uncomment this if wanna do the basic challenge for #1 # words = sys.argv[1:] # print(words) # print(random_python(words)) # print(random_python("hey class whats up".split())) print(reverse_word(words)) # this is for mad libs # words = sys.argv[1:][0] # mad_libs(words) # anagram_gen(words)
def __str__(self): return 'Group: {g}\n{st}\n'.format( g=self.__name, st='\n'.join(str(s) for s in self.__students))
def run(self, workspace): objects = workspace.object_set.get_objects(self.object_name.value) dimensions = len(objects.shape) assert isinstance(objects, cpo.Objects) has_pixels = objects.areas > 0 labels = objects.small_removed_segmented kept_labels = objects.segmented neighbor_objects = workspace.object_set.get_objects( self.neighbors_name.value) assert isinstance(neighbor_objects, cpo.Objects) neighbor_labels = neighbor_objects.small_removed_segmented # # Need to add in labels touching border. # unedited_segmented = neighbor_objects.unedited_segmented touching_border = np.zeros(np.max(unedited_segmented) + 1, bool) touching_border[unedited_segmented[0, :]] = True touching_border[unedited_segmented[-1, :]] = True touching_border[unedited_segmented[:, 0]] = True touching_border[unedited_segmented[:, -1]] = True touching_border[0] = False touching_border_mask = touching_border[unedited_segmented] nobjects = np.max(labels) nkept_objects = objects.count nneighbors = np.max(neighbor_labels) if np.any(touching_border) and \ np.all(~ touching_border_mask[neighbor_labels != 0]): # Add the border labels if any were excluded touching_border_object_number = np.cumsum(touching_border) + \ np.max(neighbor_labels) touching_border_mask = touching_border_mask & (neighbor_labels == 0) neighbor_labels = neighbor_labels.copy().astype(np.int32) neighbor_labels[touching_border_mask] = touching_border_object_number[ unedited_segmented[touching_border_mask]] _, object_numbers = objects.relate_labels(labels, kept_labels) if self.neighbors_are_objects: neighbor_numbers = object_numbers neighbor_has_pixels = has_pixels else: _, neighbor_numbers = neighbor_objects.relate_labels( neighbor_labels, neighbor_objects.segmented) neighbor_has_pixels = np.bincount(neighbor_labels.ravel())[1:] > 0 neighbor_count = np.zeros((nobjects,)) pixel_count = np.zeros((nobjects,)) first_object_number = np.zeros((nobjects,), int) second_object_number = np.zeros((nobjects,), int) first_x_vector = np.zeros((nobjects,)) second_x_vector = np.zeros((nobjects,)) first_y_vector = np.zeros((nobjects,)) second_y_vector = np.zeros((nobjects,)) angle = np.zeros((nobjects,)) percent_touching = np.zeros((nobjects,)) expanded_labels = None if self.distance_method == D_EXPAND: # Find the i,j coordinates of the nearest foreground point # to every background point if dimensions == 2: i, j = scind.distance_transform_edt(labels == 0, return_distances=False, return_indices=True) # Assign each background pixel to the label of its nearest # foreground pixel. Assign label to label for foreground. labels = labels[i, j] else: k, i, j = scind.distance_transform_edt(labels == 0, return_distances=False, return_indices=True) labels = labels[k, i, j] expanded_labels = labels # for display distance = 1 # dilate once to make touching edges overlap scale = S_EXPANDED if self.neighbors_are_objects: neighbor_labels = labels.copy() elif self.distance_method == D_WITHIN: distance = self.distance.value scale = str(distance) elif self.distance_method == D_ADJACENT: distance = 1 scale = S_ADJACENT else: raise ValueError("Unknown distance method: %s" % self.distance_method.value) if nneighbors > (1 if self.neighbors_are_objects else 0): first_objects = [] second_objects = [] object_indexes = np.arange(nobjects, dtype=np.int32) + 1 # # First, compute the first and second nearest neighbors, # and the angles between self and the first and second # nearest neighbors # ocenters = centers_of_labels( objects.small_removed_segmented).transpose() ncenters = centers_of_labels( neighbor_objects.small_removed_segmented).transpose() areas = fix(scind.sum(np.ones(labels.shape), labels, object_indexes)) perimeter_outlines = outline(labels) perimeters = fix(scind.sum( np.ones(labels.shape), perimeter_outlines, object_indexes)) i, j = np.mgrid[0:nobjects, 0:nneighbors] distance_matrix = np.sqrt((ocenters[i, 0] - ncenters[j, 0]) ** 2 + (ocenters[i, 1] - ncenters[j, 1]) ** 2) # # order[:,0] should be arange(nobjects) # order[:,1] should be the nearest neighbor # order[:,2] should be the next nearest neighbor # if distance_matrix.shape[1] == 1: # a little buggy, lexsort assumes that a 2-d array of # second dimension = 1 is a 1-d array order = np.zeros(distance_matrix.shape, int) else: order = np.lexsort([distance_matrix]) first_neighbor = 1 if self.neighbors_are_objects else 0 first_object_index = order[:, first_neighbor] first_x_vector = ncenters[first_object_index, 1] - ocenters[:, 1] first_y_vector = ncenters[first_object_index, 0] - ocenters[:, 0] if nneighbors > first_neighbor + 1: second_object_index = order[:, first_neighbor + 1] second_x_vector = ncenters[second_object_index, 1] - ocenters[:, 1] second_y_vector = ncenters[second_object_index, 0] - ocenters[:, 0] v1 = np.array((first_x_vector, first_y_vector)) v2 = np.array((second_x_vector, second_y_vector)) # # Project the unit vector v1 against the unit vector v2 # dot = (np.sum(v1 * v2, 0) / np.sqrt(np.sum(v1 ** 2, 0) * np.sum(v2 ** 2, 0))) angle = np.arccos(dot) * 180. / np.pi # Make the structuring element for dilation if dimensions == 2: strel = strel_disk(distance) else: strel = skimage.morphology.ball(distance) # # A little bigger one to enter into the border with a structure # that mimics the one used to create the outline # if dimensions == 2: strel_touching = strel_disk(distance + .5) else: strel_touching = skimage.morphology.ball(distance + 0.5) # # Get the extents for each object and calculate the patch # that excises the part of the image that is "distance" # away if dimensions == 2: i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]] min_i, max_i, min_i_pos, max_i_pos = \ scind.extrema(i, labels, object_indexes) min_j, max_j, min_j_pos, max_j_pos = \ scind.extrema(j, labels, object_indexes) min_i = np.maximum(fix(min_i) - distance, 0).astype(int) max_i = np.minimum(fix(max_i) + distance + 1, labels.shape[0]).astype(int) min_j = np.maximum(fix(min_j) - distance, 0).astype(int) max_j = np.minimum(fix(max_j) + distance + 1, labels.shape[1]).astype(int) else: k, i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1], 0:labels.shape[2]] min_k, max_k, min_k_pos, max_k_pos = scind.extrema(k, labels, object_indexes) min_i, max_i, min_i_pos, max_i_pos = scind.extrema(i, labels, object_indexes) min_j, max_j, min_j_pos, max_j_pos = scind.extrema(j, labels, object_indexes) min_k = np.maximum(fix(min_k) - distance, 0).astype(int) max_k = np.minimum(fix(max_k) + distance + 1, labels.shape[0]).astype(int) min_i = np.maximum(fix(min_i) - distance, 0).astype(int) max_i = np.minimum(fix(max_i) + distance + 1, labels.shape[1]).astype(int) min_j = np.maximum(fix(min_j) - distance, 0).astype(int) max_j = np.minimum(fix(max_j) + distance + 1, labels.shape[2]).astype(int) # # Loop over all objects # Calculate which ones overlap "index" # Calculate how much overlap there is of others to "index" # for object_number in object_numbers: if object_number == 0: # # No corresponding object in small-removed. This means # that the object has no pixels, e.g. not renumbered. # continue index = object_number - 1 if dimensions == 2: patch = labels[min_i[index]:max_i[index], min_j[index]:max_j[index]] npatch = neighbor_labels[min_i[index]:max_i[index], min_j[index]:max_j[index]] else: patch = labels[min_k[index]:max_k[index], min_i[index]:max_i[index], min_j[index]:max_j[index]] npatch = neighbor_labels[min_k[index]:max_k[index], min_i[index]:max_i[index], min_j[index]:max_j[index]] # # Find the neighbors # patch_mask = patch == (index + 1) extended = scind.binary_dilation(patch_mask, strel) neighbors = np.unique(npatch[extended]) neighbors = neighbors[neighbors != 0] if self.neighbors_are_objects: neighbors = neighbors[neighbors != object_number] nc = len(neighbors) neighbor_count[index] = nc if nc > 0: first_objects.append(np.ones(nc, int) * object_number) second_objects.append(neighbors) if self.neighbors_are_objects: # # Find the # of overlapping pixels. Dilate the neighbors # and see how many pixels overlap our image. Use a 3x3 # structuring element to expand the overlapping edge # into the perimeter. # if dimensions == 2: outline_patch = perimeter_outlines[ min_i[index]:max_i[index], min_j[index]:max_j[index]] == object_number else: outline_patch = perimeter_outlines[min_k[index]:max_k[index], min_i[index]:max_i[index], min_j[index]:max_j[index]] == object_number extended = scind.binary_dilation( (patch != 0) & (patch != object_number), strel_touching) overlap = np.sum(outline_patch & extended) pixel_count[index] = overlap if sum([len(x) for x in first_objects]) > 0: first_objects = np.hstack(first_objects) reverse_object_numbers = np.zeros( max(np.max(object_numbers), np.max(first_objects)) + 1, int) reverse_object_numbers[object_numbers] = np.arange(len(object_numbers)) + 1 first_objects = reverse_object_numbers[first_objects] second_objects = np.hstack(second_objects) reverse_neighbor_numbers = np.zeros( max(np.max(neighbor_numbers), np.max(second_objects)) + 1, int) reverse_neighbor_numbers[neighbor_numbers] = np.arange(len(neighbor_numbers)) + 1 second_objects = reverse_neighbor_numbers[second_objects] to_keep = (first_objects > 0) & (second_objects > 0) first_objects = first_objects[to_keep] second_objects = second_objects[to_keep] else: first_objects = np.zeros(0, int) second_objects = np.zeros(0, int) if self.neighbors_are_objects: percent_touching = pixel_count * 100 / perimeters else: percent_touching = pixel_count * 100.0 / areas object_indexes = object_numbers - 1 neighbor_indexes = neighbor_numbers - 1 # # Have to recompute nearest # first_object_number = np.zeros(nkept_objects, int) second_object_number = np.zeros(nkept_objects, int) if nkept_objects > (1 if self.neighbors_are_objects else 0): di = (ocenters[object_indexes[:, np.newaxis], 0] - ncenters[neighbor_indexes[np.newaxis, :], 0]) dj = (ocenters[object_indexes[:, np.newaxis], 1] - ncenters[neighbor_indexes[np.newaxis, :], 1]) distance_matrix = np.sqrt(di * di + dj * dj) distance_matrix[~ has_pixels, :] = np.inf distance_matrix[:, ~neighbor_has_pixels] = np.inf # # order[:,0] should be arange(nobjects) # order[:,1] should be the nearest neighbor # order[:,2] should be the next nearest neighbor # order = np.lexsort([distance_matrix]).astype( first_object_number.dtype) if self.neighbors_are_objects: first_object_number[has_pixels] = order[has_pixels, 1] + 1 if nkept_objects > 2: second_object_number[has_pixels] = order[has_pixels, 2] + 1 else: first_object_number[has_pixels] = order[has_pixels, 0] + 1 if order.shape[1] > 1: second_object_number[has_pixels] = order[has_pixels, 1] + 1 else: object_indexes = object_numbers - 1 neighbor_indexes = neighbor_numbers - 1 first_objects = np.zeros(0, int) second_objects = np.zeros(0, int) # # Now convert all measurements from the small-removed to # the final number set. # neighbor_count = neighbor_count[object_indexes] neighbor_count[~ has_pixels] = 0 percent_touching = percent_touching[object_indexes] percent_touching[~ has_pixels] = 0 first_x_vector = first_x_vector[object_indexes] second_x_vector = second_x_vector[object_indexes] first_y_vector = first_y_vector[object_indexes] second_y_vector = second_y_vector[object_indexes] angle = angle[object_indexes] # # Record the measurements # assert (isinstance(workspace, cpw.Workspace)) m = workspace.measurements assert (isinstance(m, cpmeas.Measurements)) image_set = workspace.image_set features_and_data = [ (M_NUMBER_OF_NEIGHBORS, neighbor_count), (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number), (M_FIRST_CLOSEST_DISTANCE, np.sqrt(first_x_vector ** 2 + first_y_vector ** 2)), (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number), (M_SECOND_CLOSEST_DISTANCE, np.sqrt(second_x_vector ** 2 + second_y_vector ** 2)), (M_ANGLE_BETWEEN_NEIGHBORS, angle)] if self.neighbors_are_objects: features_and_data.append((M_PERCENT_TOUCHING, percent_touching)) for feature_name, data in features_and_data: m.add_measurement(self.object_name.value, self.get_measurement_name(feature_name), data) if len(first_objects) > 0: m.add_relate_measurement( self.module_num, cpmeas.NEIGHBORS, self.object_name.value, self.object_name.value if self.neighbors_are_objects else self.neighbors_name.value, m.image_set_number * np.ones(first_objects.shape, int), first_objects, m.image_set_number * np.ones(second_objects.shape, int), second_objects) labels = kept_labels neighbor_count_image = np.zeros(labels.shape, int) object_mask = objects.segmented != 0 object_indexes = objects.segmented[object_mask] - 1 neighbor_count_image[object_mask] = neighbor_count[object_indexes] workspace.display_data.neighbor_count_image = neighbor_count_image if self.neighbors_are_objects: percent_touching_image = np.zeros(labels.shape) percent_touching_image[object_mask] = percent_touching[object_indexes] workspace.display_data.percent_touching_image = percent_touching_image image_set = workspace.image_set if self.wants_count_image.value: neighbor_cm_name = self.count_colormap.value neighbor_cm = get_colormap(neighbor_cm_name) sm = matplotlib.cm.ScalarMappable(cmap=neighbor_cm) img = sm.to_rgba(neighbor_count_image)[:, :, :3] img[:, :, 0][~ object_mask] = 0 img[:, :, 1][~ object_mask] = 0 img[:, :, 2][~ object_mask] = 0 count_image = cpi.Image(img, masking_objects=objects) image_set.add(self.count_image_name.value, count_image) else: neighbor_cm_name = cpprefs.get_default_colormap() neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name) if self.neighbors_are_objects and self.wants_percent_touching_image: percent_touching_cm_name = self.touching_colormap.value percent_touching_cm = get_colormap(percent_touching_cm_name) sm = matplotlib.cm.ScalarMappable(cmap=percent_touching_cm) img = sm.to_rgba(percent_touching_image)[:, :, :3] img[:, :, 0][~ object_mask] = 0 img[:, :, 1][~ object_mask] = 0 img[:, :, 2][~ object_mask] = 0 touching_image = cpi.Image(img, masking_objects=objects) image_set.add(self.touching_image_name.value, touching_image) else: percent_touching_cm_name = cpprefs.get_default_colormap() percent_touching_cm = matplotlib.cm.get_cmap(percent_touching_cm_name) if self.show_window: workspace.display_data.neighbor_cm_name = neighbor_cm_name workspace.display_data.percent_touching_cm_name = percent_touching_cm_name workspace.display_data.orig_labels = objects.segmented workspace.display_data.expanded_labels = expanded_labels workspace.display_data.object_mask = object_mask workspace.display_data.dimensions = dimensions
import pandas as pd username = '******' conduit = pd.read_excel('conduit.xlsx') fail = 0 for i in range(0, len(conduit[mes])): if(conduit[mes][i] == 0): fail += 1 print(fail) print('Статистика: \n' + 'Количество пропусков: ' + str(fail))
class Player: run = [pygame.image.load(os.path.join('data\images', str(x) + '.png')) for x in range(8, 16)] jump = [pygame.image.load(os.path.join('data\images', str(x) + '.png')) for x in range(1, 8)] slide = [pygame.image.load(os.path.join('data\images', 'S1.png')), pygame.image.load(os.path.join('data\images', 'S2.png')), pygame.image.load(os.path.join('data\images', 'S2.png')), pygame.image.load(os.path.join('data\images', 'S2.png')), pygame.image.load(os.path.join('data\images', 'S2.png')), pygame.image.load(os.path.join('data\images', 'S2.png')), pygame.image.load(os.path.join('data\images', 'S2.png')), pygame.image.load(os.path.join('data\images', 'S2.png')), pygame.image.load(os.path.join('data\images', 'S3.png')), pygame.image.load(os.path.join('data\images', 'S4.png')), pygame.image.load(os.path.join('data\images', 'S5.png'))] fall = pygame.image.load(os.path.join('data\images', '0.png')) jump_list = [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4] # Массив для скорости прыжка def __init__(self, x, y, width, height): self.x = x self.y = y self.width = width self.height = height self.jumping = False # Нужно для того чтобы понять что нажал игрок и что должен делать персонаж. self.sliding = False # Нужно для того чтобы понять что нажал игрок и что должен делать персонаж. self.slide_count = 0 # Счётчик для slide фреймов self.jump_count = 0 # Счётчик для jump фреймов self.run_count = 0 # Счётчик для run фреймов self.slide_up = False self.game_over = False def draw(self, win): if self.game_over: win.blit(self.fall, (self.x, self.y + 30)) elif self.jumping: self.y -= self.jump_list[self.jump_count] * 1.2 # y координата персонажа во время прыжка win.blit(self.jump[self.jump_count // 18], (self.x, self.y)) # Отдаём на каждую картинку с прыжком 18 фреймов. self.jump_count += 1 # Обновляем счётчик фреймов if self.jump_count > 108: # Если счётчик выше 108, то значит что персонаж завершил прыжок self.jump_count = 0 self.jumping = False self.run_count = 0 self.hitbox = (self.x + 4, self.y, self.width - 24, self.height - 10) # Hitbox во время прыжка elif self.sliding or self.slide_up: if self.slide_count < 10: self.y += 2 elif self.slide_count == 80: self.y -= 18 self.sliding = False self.slide_up = True elif self.slide_count > 10 and self.slide_count < 80: self.hitbox = (self.x, self.y + 3, self.width - 8, self.height - 35) # Опускаем hitbox во время слайда if self.slide_count >= 90: # Персонаж закончил slide self.slide_count = 0 self.slide_up = False self.run_count = 0 self.hitbox = (self.x + 4, self.y, self.width - 24, self.height - 10) # Возращаем хитбокс win.blit(self.slide[self.slide_count // 10], (self.x, self.y)) self.slide_count += 1 # Обновляем счётчик else: if self.run_count > 42: self.run_count = 0 win.blit(self.run[self.run_count // 6], (self.x, self.y)) # Отдаём на каждую картинку run 6 фреймов self.run_count += 1 # Обновляем счётчик self.hitbox = (self.x + 4, self.y - 2, self.width - 24, self.height - 13)
def __repr__(self): return "" + str(self.f) + "/" + str(self.letter)
def generate_xlsx_report(self, workbook, data, lines): worksheet = workbook.add_worksheet("INVOICE REPORT 1") format1 = workbook.add_format({ 'font_size': 14, 'bottom': True, 'right': True, 'left': True, 'top': True, 'align': 'center', 'bold': True }) format12 = workbook.add_format({ 'font_size': 16, 'align': 'center', 'right': True, 'left': True, 'bottom': False, 'top': True, 'bold': True, }) format3 = workbook.add_format({ 'bold': True, 'bottom': True, 'top': True, 'font_size': 12, 'align': 'center' }) font_size_8 = workbook.add_format({ 'bottom': True, 'top': True, 'right': True, 'left': True, 'font_size': 8 }) justify = workbook.add_format({ 'bottom': True, 'top': True, 'right': True, 'left': True, 'font_size': 12, 'bold': True, 'align': 'center' }) format3.set_align('center') font_size_8.set_align('center') justify.set_align('justify') format1.set_align('center') boldc = workbook.add_format({'bold': True, 'align': 'center'}) center = workbook.add_format({'align': 'center'}) filter_row = 3 filter_row1 = 5 if data['form']['date_from']: date_from = datetime.strptime(data['form']['date_from'], '%Y-%m-%d').date() worksheet.write('A%s' % filter_row, 'Date From', boldc) worksheet.write('B%s' % filter_row, str(date_from.strftime("%d-%m-%Y")), boldc) if data['form']['date_to']: date_to = datetime.strptime(data['form']['date_to'], '%Y-%m-%d').date() worksheet.write('D%s' % filter_row, 'Date To', boldc) worksheet.write('E%s' % filter_row, str(date_to.strftime("%d-%m-%Y")), boldc) row = 6 new_row = row + 1 worksheet.set_column('A:A', 20) worksheet.set_column('B:B', 25) worksheet.set_column('C:C', 20) worksheet.set_column('D:D', 20) worksheet.set_column('E:E', 25) worksheet.set_column('F:F', 20) worksheet.merge_range('A1:E1', 'INVOICING REPORT', format3) worksheet.write('A%s' % row, 'SL NO', format3) worksheet.write('B%s' % row, 'SALES PERSON', format3) worksheet.write('C%s' % row, 'CUSTOMER NAME', format3) worksheet.write('D%s' % row, 'INVOICE DATE', format3) worksheet.write('E%s' % row, 'COMPANY NAME', format3) worksheet.write('F%s' % row, 'TOTAL', format3) # worksheet.write('G%s' % row, 'quntity', format3) domain = [] first = data['form']['date_from'] last = data['form']['date_to'] self.env.cr.execute('select am.name as id,pt.name as customer_name,am.invoice_date,am.invoice_date_due,am.amount_untaxed_signed,'\ 'am.amount_total_signed,am.amount_residual_signed,am.state,am.invoice_user_id,rp.name as name,co.name as company '\ 'from account_move am '\ 'join res_users ru on ru.id = am.invoice_user_id '\ 'join res_partner rp on ru.partner_id = rp.id '\ 'join res_partner pt on pt.id = am.partner_id '\ 'join res_company co on co.partner_id = am.company_id '\ 'where invoice_date '\ 'between %s '\ 'and %s ', (str(first),str(last))) values = self.env.cr.dictfetchall() sl_no = 1 # record = self.env['account.move'].search(domain) val = [] for each in values: worksheet.write('A%s' % new_row, sl_no, center) worksheet.write('B%s' % new_row, each['name']) worksheet.write('C%s' % new_row, each['customer_name']) worksheet.write('D%s' % new_row, each['invoice_date'].strftime("%d-%m-%Y"), center) worksheet.write('E%s' % new_row, each['company']) worksheet.write('F%s' % new_row, each['amount_total_signed']) # new_row = new_row + 1 # worksheet.write('B%s' % new_row, 'quantity',center) # worksheet.write('C%s' % new_row, 'product',center) # worksheet.write('D%s' % new_row, 'price',center) # for k in each.invoice_line_ids: # new_row = new_row + 1 # worksheet.write('B%s' % new_row, k.quantity) # # new_row+=1 # worksheet.write('C%s' % new_row, k.name) # worksheet.write('D%s' % new_row, k.price_unit) new_row += 1 sl_no += 1
def callback_get(bot, update): print("callback") print(update.callback_query.data) callquery.append(update.callback_query.data) if update.callback_query.data == '맛집 추천받기': bot.sendMessage(chat_id=chat_id, text=emojize("안녕! 나는 머먹찌야:hamster:" + "\n" + "맛집을 추천해줄게!" + "\n" + "어디서 먹을건지 지역을 적어줘!", use_aliases=True)) if update.callback_query.data == "강남구 신사동" or update.callback_query.data == "관악구 신사동" or update.callback_query.data == "은평구 신사동": Gu=update.callback_query.data.split()[0] Dong=update.callback_query.data.split()[1] df1, gu = get_tablebylocation(Dong) gu_list.append(Gu) addrs.append(update.callback_query.data) bot.send_message(chat_id=chat_id, text=update.callback_query.data + '에서 먹을거구나!' + '\n' + update.callback_query.data + '에서 먹고 싶은 메뉴가 있어?' + '\n' + '있으면 메뉴를 적어줘!' + '\n' + '없으면 "없어"라고 적어줘!', parse_mode="Markdown") print(addrs) print(gu_list) if update.callback_query.data == "더보기": more_num.append(1) df1,gu = get_tablebylocation(addrs[0]) # print('11111111111111111111111',df1) df2 = get_tablebymenu(menus[0], df1) df2 = df2.values.tolist() # print('222222222222222222222222222222', df2) # df2를 랜덤으로 5개씩 묶어 놓는 함수 만들기 OK userlist = [2, 3, 6, 7, 8, 9, 10, 14, 15, 16, 17] df3 = tuple(divide_list(df2, 5)) # print(df3) index=len(more_num) df3 = df3[index] for i in range(len(df3)): txt = '' for j in userlist: txt = txt+str(df3[i][j])+'\n' # print('txt',txt) bot.send_message(chat_id=chat_id, text=txt) print(len(more_num)) # addrs.clear() # test2(update) menus.clear() addrs.clear() gu_list.clear() bot.send_message(chat_id=chat_id, text='내가 준비한건 여기까지!') bot.sendMessage(chat_id=chat_id, text=emojize( "다시 안녕! 나는 머먹찌야:hamster:" + "\n" + "맛집을 추천해줄게!" + "\n" + "어디서 먹을건지 지역을 적어줘!" + "\n" + "예)을지로", use_aliases=True)) if update.callback_query.data == "이제 됐어": bot.sendMessage(chat_id=chat_id, text=emojize( "다시 안녕! 나는 머먹찌야:hamster:" + "\n" + "맛집을 추천해줄게!" + "\n" + "어디서 먹을건지 지역을 적어줘!" + "\n" + "예)을지로", use_aliases=True)) if update.callback_query.data != "없어" and update.callback_query.data != "더보기" and update.callback_query.data != "이제 됐어" and update.callback_query.data != "강남구 신사동" and update.callback_query.data != "관악구 신사동" and update.callback_query.data != "은평구 신사동" and update.callback_query.data != "맛집 추천받기" and update.callback_query.data != "머먹찌랑 대화하기": bot.edit_message_text(text=update.callback_query.data+" 좋은 선택이야!", chat_id=update.callback_query.message.chat_id, message_id=update.callback_query.message.message_id) menus.append(update.callback_query.data) df1,gu = get_tablebylocation(addrs[0]) # print('11111111111111111111111',df1) df2 = get_tablebymenu(menus[0], df1) df2 = df2.values.tolist() # print('222222222222222222222222222222', df2) # df2를 랜덤으로 5개씩 묶어 놓는 함수 만들기 OK userlist = [2, 3, 6, 7, 8, 9, 10, 14, 15, 16, 17] if len(df2) == 0: bot.send_message(chat_id=chat_id, text='미안.. 아직 내 데이터상에는'+'\n'+' 이 지역에 ' + update.callback_query.data + ' 맛집은 없어ㅜ.ㅜ' + '\n' + '다시 처음부터 적어줘!') menus.clear() addrs.clear() gu_list.clear() bot.sendMessage(chat_id=chat_id, text=emojize( "다시 안녕! 나는 머먹찌야:hamster:" + "\n" + "맛집을 추천해줄게!" + "\n" + "어디서 먹을건지 지역을 적어줘!" + "\n" + "예)을지로", use_aliases=True)) # 처음으로 돌아가기 구현. OK else: df3 = tuple(divide_list(df2, 5)) # print(df3) df3 = df3[0] for i in range(len(df3)): txt = '' for j in userlist: txt = txt+str(df3[i][j])+'\n' # print(txt) bot.send_message(chat_id=chat_id, text=txt) menus.clear() addrs.clear() gu_list.clear() bot.send_message(chat_id=chat_id, text='내가 준비한건 여기까지!') bot.sendMessage(chat_id=chat_id, text=emojize( "다시 안녕! 나는 머먹찌야:hamster:" + "\n" + "맛집을 추천해줄게!" + "\n" + "어디서 먹을건지 지역을 적어줘!" + "\n" + "예)을지로", use_aliases=True)) if update.callback_query.data == "없어": bot.edit_message_text( text=emojize("그렇다면 너의 *현재 기분*에따라 메뉴를 추천해줄게" + "\n" + "지금 기분이 어때?:hamster:", use_aliases=True), chat_id=update.callback_query.message.chat_id, message_id=update.callback_query.message.message_id,parse_mode = "Markdown") if update.callback_query.data == '머먹찌랑 대화하기': bot.edit_message_text( text=emojize("그래! 나랑 얘기하고 놀자!:hamster:", use_aliases=True), chat_id=update.callback_query.message.chat_id, message_id=update.callback_query.message.message_id)
def __repr__(self): return str(self.heaparr[1:self.l+1])
for json_file in glob.glob("experiment_*/*.json"): with open(json_file, "r") as infile: json_data = json.load(infile) data.append( (json_data['params']['dctcpG'], float(json_data['results']['averageFCT'].split()[0]) * 1000)) fct = [] tup = [] for point in sorted(data): fct.append(point[1]) tup.append(point[0]) N = len(fct) ind = np.arange(N) width = 0.35 fig, ax = plt.subplots() ax.bar(ind, fct, width, color="red") labels = [str(x) for x in tup] ax.set_xticks(ind + width / 2) ax.set_xticklabels(labels) plt.xlabel("dctcp_shift_g") plt.ylabel("Average FCT (ms)") plt.grid() plt.savefig("sens_shift_g.pdf", format='pdf', dpi=1200) plt.show()
def _loads_cpp_info(text): pattern = re.compile(r"^\[([a-zA-Z0-9_:-\\.]+)\]([^\[]+)", re.MULTILINE) result = DepsCppInfo() try: for m in pattern.finditer(text): var_name = m.group(1) lines = [] for line in m.group(2).splitlines(): line = line.strip() if not line or line[0] == "#": continue lines.append(line) if not lines: continue tokens = var_name.split(":") if len(tokens) == 2: # has config var_name, config = tokens else: config = None tokens = var_name.split("_", 1) field = tokens[0] if len(tokens) == 2: dep = tokens[1] dep_cpp_info = result._dependencies.setdefault(dep, CppInfo(root_folder="")) if field == "rootpath": lines = lines[0] item_to_apply = dep_cpp_info else: item_to_apply = result if config: config_deps = getattr(item_to_apply, config) setattr(config_deps, field, lines) else: setattr(item_to_apply, field, lines) except Exception as e: logger.error(traceback.format_exc()) raise ConanException("There was an error parsing conanbuildinfo.txt: %s" % str(e)) return result
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 features = [] for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None if is_training: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 if (example.start_position < doc_start or example.end_position < doc_start or example.start_position > doc_end or example.end_position > doc_end): continue doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join( [printable_text(x) for x in tokens])) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (printable_text(answer_text))) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position)) unique_id += 1 return features
def handler1(bot, update): text = update.message.text chat_id = update.message.chat_id print('gpt') mm = get_menu(text) if callquery[0] == '맛집 추천받기': if mm is not None : menus.append(text) bot.send_message(chat_id=chat_id, text=text + ' 먹고싶구나?! 추천해줄게!' + '\n' + '맛집 리스트를 가져오고 있어~' + '\n' + '조금만 기다려줘~') print(menus) df1,gu = get_tablebylocation(addrs[0]) df2 = get_tablebymenu(menus[0], df1) df2 = df2.values.tolist() print(df2) # df2를 랜덤으로 5개씩 묶어 놓는 함수 만들기 OK userlist = [2, 3, 6, 7, 8, 9, 10, 14, 15, 16, 17] if len(df2) == 0: bot.send_message(chat_id=chat_id, text='미안.. 아직 내 데이터상에는'+'\n'+' 이 지역에 ' + text + ' 맛집은 없어ㅜ.ㅜ' + '\n' + '다시 처음부터 적어줘!') # Stringmessage_text = "<b>" + update.getMessage().getFrom().getFirstName() + "</b>" + " Said what? \n" + update.getMessage().getText(); menus.clear() addrs.clear() gu_list.clear() bot.sendMessage(chat_id=chat_id, text=emojize( "다시 안녕! 나는 머먹찌야:hamster:" + "\n" + "맛집을 추천해줄게!" + "\n" + "어디서 먹을건지 지역을 적어줘!" + "\n" + "예)을지로", use_aliases=True)) # 처음으로 돌아가기 구현. OK else: df3 = tuple(divide_list(df2, 5)) print(df3) print(len(df3)) if len(df3) >= 2: df3 = df3[0] for i in range(len(df3)): txt = '' for j in userlist: txt = txt + str(df3[i][j]) + '\n' # print('txt',txt) bot.send_message(chat_id=chat_id, text=txt) # addrs.clear() # 더보기 more_button(update) else: df3 = df3[0] for i in range(len(df3)): txt = '' for j in userlist: txt = txt + str(df3[i][j]) + '\n' # print('txt',txt) bot.send_message(chat_id=chat_id, text=txt) menus.clear() addrs.clear() gu_list.clear() bot.send_message(chat_id=chat_id, text='내가 준비한건 여기까지!') bot.sendMessage(chat_id=chat_id, text=emojize( "다시 안녕! 나는 머먹찌야:hamster:" + "\n" + "맛집을 추천해줄게!" + "\n" + "어디서 먹을건지 지역을 적어줘!" + "\n" + "예)을지로", use_aliases=True)) elif text in feeling: print(text) bot.send_message(chat_id=chat_id, text='그렇구나 '+'*'+text+'*'+' 감정이구나..',parse_mode = "Markdown") # taste_menu=[] ff = feel_sum(text) print('[newchatbot5]: {}'.format(ff)) # print(ff) ff = sum(ff, []) ff = set(ff) ff = list(ff) print(len(ff)) df4 = get_menuname(ff) df5 = random(df4) feeling_button(bot, update, df5) elif text=='머먹찌': gpt_button(bot,update) callquery.clear() elif text == '없어': nope.append(text) bot.send_message(chat_id=chat_id, text='그렇다면 *날씨 기반*으로 메뉴를 추천해줄게!', parse_mode = "Markdown") nx, ny = get_weather(gu_list[0]) w_con,df3 = get_menubyweather(nx,ny) print(w_con) bot.send_message(chat_id=chat_id,text='오늘은 '+'*'+w_con+'*'+'의 날씨야!',parse_mode = "Markdown") print(len(df3), df3) df4 = get_menuname(df3) print(len(df4), df4) df5 = random(df4) print(df5) weather_button(bot, update, df5) elif mm is None: if text =='하이': bot.sendMessage(chat_id=chat_id, text=emojize( "안녕! 나는 머먹찌야!:hamster:", use_aliases=True)) elif text=='꺼져': bot.send_message(chat_id=chat_id,text='욕하지 마셈!') elif text=='신사동': bot.send_message(chat_id=chat_id,text='신사동은 여러곳에 있어! ') gu_request(update) elif text!= '슬퍼': df1,gu = get_tablebylocation(text) if len(df1) > 0 and len(text)>1: gu_list.append(gu) addrs.append(text) bot.send_message(chat_id=chat_id, text=text + '에서 먹을거구나!' + '\n'+ text + '에서 먹고 싶은 메뉴가 있어?' + '\n' + '있으면 메뉴를 적어줘!' + '\n' + '없으면 "없어"라고 적어줘!', parse_mode = "Markdown") print(addrs) print(gu_list) else: bot.send_message(chat_id=chat_id, text='그 말은 아직 내가 몰라..ㅠㅠ') else: get_message(bot, update) print('bot') if text=='머먹찌': gpt_button(bot,update) callquery.clear()
def display_grid(): for i in range(len(grid)): print(' ', ' '.join(str(int(grid[i][j] != 0)) for j in range(len(grid))))
net_count = 0 psi_count = num_psi_steps +1 # Specify which classes to train on class_use = np.array([0,1,2,3,4,5,6,7,8,9]) class_use_str = np.array2string(class_use) test_size = batch_size # flag to determine whether or not you want to load the file from a checkpoint load_checkpoint = 0 np.seterr(all='ignore') # Define save directories if alternate_steps_flag == 0: save_folder = opt.model + '_vAN' + str(opt.priorWeight_nsteps) + '_vAT' + str(opt.netWeights_psteps) + '_' + str(numRestart) + 'start_' + data_use + '_pre' + str(num_pretrain_steps) + '_CA' + str(closest_anchor_flag) + '_M' + str(M) + '_z' + str(z_dim) + '_A' + str(opt.num_anchor) + '_batch' + str(opt.batch_size) + '_rw' + str(opt.recon_weight) + '_pol1' + str(opt.post_l1_weight) + '_poR' + str(opt.post_TO_weight)+ '_poC' + str(opt.post_cInfer_weight) + '_prl1' + str(opt.prior_l1_weight) + '_prR' + str(opt.prior_weight) + '_prC' + str(opt.prior_cInfer_weight) + '_g' + str(opt.gamma) + '_lr' + str(opt.lr) +'/' sample_dir = opt.model + '_vAN' + str(opt.priorWeight_nsteps) + '_vAT' + str(opt.netWeights_psteps) + '_' + str(numRestart) + 'start_' + data_use + '_pre' + str(num_pretrain_steps) + '_CA' + str(closest_anchor_flag) + '_M' + str(M) + '_z' + str(z_dim) + '_A' + str(opt.num_anchor) + '_batch' + str(opt.batch_size) + '_rw' + str(opt.recon_weight) + '_pol1' + str(opt.post_l1_weight) + '_poR' + str(opt.post_TO_weight)+ '_poC' + str(opt.post_cInfer_weight) + '_prl1' + str(opt.prior_l1_weight) + '_prR' + str(opt.prior_weight) + '_prC' + str(opt.prior_cInfer_weight) + '_g' + str(opt.gamma) + '_lr' + str(opt.lr) +'_samples/' else: save_folder = opt.model + '_vAN' + str(opt.priorWeight_nsteps) + '_vAT' + str(opt.netWeights_psteps) + '_' + str(numRestart) + 'start_' + data_use + '_pre' + str(num_pretrain_steps) + '_CA' + str(closest_anchor_flag) + '_M' + str(M) + '_z' + str(z_dim) + '_A' + str(opt.num_anchor) + '_batch' + str(opt.batch_size) + '_rw' + str(opt.recon_weight) + '_pol1' + str(opt.post_l1_weight) + '_poR' + str(opt.post_TO_weight)+ '_poC' + str(opt.post_cInfer_weight) + '_prl1' + str(opt.prior_l1_weight) + '_prR' + str(opt.prior_weight) + '_prC' + str(opt.prior_cInfer_weight) + '_g' + str(opt.gamma) + '_lr' + str(opt.lr) + '_nst' + str(num_net_steps) + 'pst' + str(num_psi_steps) + '/' sample_dir= opt.model + '_vAN' + str(opt.priorWeight_nsteps) + '_vAT' + str(opt.netWeights_psteps) + '_' + str(numRestart) + 'start_' + data_use + '_pre' + str(num_pretrain_steps) + '_CA' + str(closest_anchor_flag) + '_M' + str(M) + '_z' + str(z_dim) + '_A' + str(opt.num_anchor) + '_batch' + str(opt.batch_size) + '_rw' + str(opt.recon_weight) + '_pol1' + str(opt.post_l1_weight) + '_poR' + str(opt.post_TO_weight)+ '_poC' + str(opt.post_cInfer_weight) + '_prl1' + str(opt.prior_l1_weight) + '_prR' + str(opt.prior_weight) + '_prC' + str(opt.prior_cInfer_weight) + '_g' + str(opt.gamma) + '_lr' + str(opt.lr) + '_nst' + str(num_net_steps) + 'pst' + str(num_psi_steps) + '_samples/' if not os.path.exists(save_folder): os.makedirs(save_folder) if not os.path.exists(sample_dir): os.makedirs(sample_dir) # Start log file logging.basicConfig(filename=save_folder + 'output.log',level = logging.DEBUG) # Initialize the networks and datasets if data_use == 'concen_circle':
def service(self): try: lib_tools.create_folder(os.path.join(control.transPath(control.setting('library.movie')), '')) lib_tools.create_folder(os.path.join(control.transPath(control.setting('library.tv')), '')) except: pass try: control.makeFile(control.dataPath) dbcon = database.connect(control.libcacheFile) dbcur = dbcon.cursor() dbcur.execute("CREATE TABLE IF NOT EXISTS service (""setting TEXT, ""value TEXT, ""UNIQUE(setting)"");") dbcur.execute("SELECT * FROM service WHERE setting = 'last_run'") fetch = dbcur.fetchone() if fetch == None: serviceProperty = "1970-01-01 23:59:00.000000" dbcur.execute("INSERT INTO service Values (?, ?)", ('last_run', serviceProperty)) dbcon.commit() else: serviceProperty = str(fetch[1]) dbcon.close() except: try: return dbcon.close() except: return try: control.window.setProperty(self.property, serviceProperty) except: return while not xbmc.abortRequested: try: serviceProperty = control.window.getProperty(self.property) t1 = datetime.timedelta(hours=6) t2 = datetime.datetime.strptime(serviceProperty, '%Y-%m-%d %H:%M:%S.%f') t3 = datetime.datetime.now() check = abs(t3 - t2) > t1 if check == False: raise Exception() if (control.player.isPlaying() or control.condVisibility('Library.IsScanningVideo')): raise Exception() serviceProperty = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') control.window.setProperty(self.property, serviceProperty) try: dbcon = database.connect(control.libcacheFile) dbcur = dbcon.cursor() dbcur.execute("CREATE TABLE IF NOT EXISTS service (""setting TEXT, ""value TEXT, ""UNIQUE(setting)"");") dbcur.execute("DELETE FROM service WHERE setting = 'last_run'") dbcur.execute("INSERT INTO service Values (?, ?)", ('last_run', serviceProperty)) dbcon.commit() dbcon.close() except: try: dbcon.close() except: pass if not control.setting('library.service.update') == 'true': raise Exception() info = control.setting('library.service.notification') or 'true' self.update(info=info) except: pass control.sleep(10000)
lines = list(f) lines = f.readlines() f.close() #3. 写入文件。 # # f.write(string) 方法将 string 的内容写入文件,并返回写入字符的长度: fname = __file__ + 'd' f = open(fname, 'w') f.write('I am a file of ' + fname) # 想要写入其他非字符串内容,首先要将它转换为字符串: value = ('the answer', 42) s = str(value) f.write(s) #4. 文件位置 # # f.tell() 返回一个整数,代表文件对象在文件中的指针位置,该数值计量了自文件开头到指针处的比特数。 # 需要改变文件对象指针话话,使用 f.seek(offset,from_what)。 # 指针在该操作中从指定的引用位置移动 offset 比特,引用位置由 from_what 参数指定。 # from_what 值为 0 表示自文件起始处开始,1 表示自当前文件指针位置开始,2 表示自文件末尾开始。from_what 可以忽略,其默认值为零,此时从文件头开始: # # 在文本文件中(没有以 b 模式打开),只允许从文件头开始寻找(有个例外是用 seek(0, 2) 寻找文件的最末尾处)而且合法的 偏移 值只能是 f.tell() 返回的值或者是零。 # 其它任何 偏移 值都会产生未定义的行为。 fr = open(__file__, "r") for line in fr: f.write(line)
def add(self, tvshowtitle, year, imdb, tvdb, range=False): if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo')\ and self.silentDialog is False: control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000) self.infoDialog = True from resources.lib.indexers import episodes items = episodes.episodes().get(tvshowtitle, year, imdb, tvdb, idx=False) try: items = [{'title': i['title'], 'year': i['year'], 'imdb': i['imdb'], 'tvdb': i['tvdb'], 'season': i['season'], 'episode': i['episode'], 'tvshowtitle': i['tvshowtitle'], 'premiered': i['premiered']} for i in items] except: items = [] try: if not self.dupe_setting == 'true': raise Exception() if items == []: raise Exception() id = [items[0]['imdb'], items[0]['tvdb']] lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}') lib = unicode(lib, 'utf-8', errors='ignore') lib = json.loads(lib)['result']['tvshows'] lib = [i['title'].encode('utf-8') for i in lib if str(i['imdbnumber']) in id or (i['title'].encode('utf-8') == items[0]['tvshowtitle'] and str(i['year']) == items[0]['year'])][0] lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}' % lib) lib = unicode(lib, 'utf-8', errors='ignore') lib = json.loads(lib)['result']['episodes'] lib = ['S%02dE%02d' % (int(i['season']), int(i['episode'])) for i in lib] items = [i for i in items if not 'S%02dE%02d' % (int(i['season']), int(i['episode'])) in lib] except: pass files_added = 0 for i in items: try: if xbmc.abortRequested == True: return sys.exit() if self.check_setting == 'true': if i['episode'] == '1': self.block = True src = lib_tools.check_sources(i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered']) if src: self.block = False if self.block == True: raise Exception() premiered = i.get('premiered', '0') if (premiered != '0' and int(re.sub('[^0-9]', '', str(premiered))) > int(self.date)) or (premiered == '0' and not self.include_unknown): continue self.strmFile(i) files_added += 1 except: pass if range == True: return if self.infoDialog is True: control.infoDialog(control.lang(32554).encode('utf-8'), time=1) if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0: control.execute('UpdateLibrary(video)')
def detect_image(self, image): image_shape = np.array(np.shape(image)[0:2]) #---------------------------------------------------------# # 给图像增加灰条,实现不失真的resize # 也可以直接resize进行识别 #---------------------------------------------------------# if self.letterbox_image: crop_img = np.array(letterbox_image(image, (self.input_shape[1],self.input_shape[0]))) else: crop_img = image.convert('RGB') crop_img = crop_img.resize((self.input_shape[1],self.input_shape[0]), Image.BICUBIC) photo = np.array(crop_img,dtype = np.float64) with torch.no_grad(): #---------------------------------------------------# # 图片预处理,归一化 #---------------------------------------------------# photo = Variable(torch.from_numpy(np.expand_dims(np.transpose(photo - MEANS, (2,0,1)), 0)).type(torch.FloatTensor)) if self.cuda: photo = photo.cuda() #---------------------------------------------------# # 传入网络进行预测 #---------------------------------------------------# preds = self.net(photo) top_conf = [] top_label = [] top_bboxes = [] #---------------------------------------------------# # preds的shape为 1, num_classes, top_k, 5 #---------------------------------------------------# for i in range(preds.size(1)): j = 0 while preds[0, i, j, 0] >= self.confidence: #---------------------------------------------------# # score为当前预测框的得分 # label_name为预测框的种类 #---------------------------------------------------# score = preds[0, i, j, 0] label_name = self.class_names[i-1] #---------------------------------------------------# # pt的shape为4, 当前预测框的左上角右下角 #---------------------------------------------------# pt = (preds[0, i, j, 1:]).detach().numpy() coords = [pt[0], pt[1], pt[2], pt[3]] top_conf.append(score) top_label.append(label_name) top_bboxes.append(coords) j = j + 1 # 如果不存在满足门限的预测框,直接返回原图 if len(top_conf)<=0: return image top_conf = np.array(top_conf) top_label = np.array(top_label) top_bboxes = np.array(top_bboxes) top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(top_bboxes[:,0], -1),np.expand_dims(top_bboxes[:,1], -1),np.expand_dims(top_bboxes[:,2], -1),np.expand_dims(top_bboxes[:,3], -1) #-----------------------------------------------------------# # 去掉灰条部分 #-----------------------------------------------------------# if self.letterbox_image: boxes = ssd_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.input_shape[0],self.input_shape[1]]),image_shape) else: top_xmin = top_xmin * image_shape[1] top_ymin = top_ymin * image_shape[0] top_xmax = top_xmax * image_shape[1] top_ymax = top_ymax * image_shape[0] boxes = np.concatenate([top_ymin,top_xmin,top_ymax,top_xmax], axis=-1) font = ImageFont.truetype(font='model_data/simhei.ttf',size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32')) thickness = max((np.shape(image)[0] + np.shape(image)[1]) // self.input_shape[0], 1) for i, c in enumerate(top_label): predicted_class = c score = top_conf[i] top, left, bottom, right = boxes[i] top = top - 5 left = left - 5 bottom = bottom + 5 right = right + 5 top = max(0, np.floor(top + 0.5).astype('int32')) left = max(0, np.floor(left + 0.5).astype('int32')) bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32')) right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32')) # 画框框 label = '{} {:.2f}'.format(predicted_class, score) draw = ImageDraw.Draw(image) label_size = draw.textsize(label, font) label = label.encode('utf-8') print(label, top, left, bottom, right) if top - label_size[1] >= 0: text_origin = np.array([left, top - label_size[1]]) else: text_origin = np.array([left, top + 1]) for i in range(thickness): draw.rectangle( [left + i, top + i, right - i, bottom - i], outline=self.colors[self.class_names.index(predicted_class)]) draw.rectangle( [tuple(text_origin), tuple(text_origin + label_size)], fill=self.colors[self.class_names.index(predicted_class)]) draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font) del draw return image
def getTemp(self,temp): if config.plugins.MetrixWeather.tempUnit.value == "Fahrenheit": return str(int(round(float(temp),0))) else: celsius = (float(temp) - 32 ) * 5 / 9 return str(int(round(float(celsius),0)))
def update(self, query=None, info='true'): if not query == None: control.idle() try: items = [] season, episode = [], [] show = [os.path.join(self.library_folder, i) for i in control.listDir(self.library_folder)[0]] for s in show: try: season += [os.path.join(s, i) for i in control.listDir(s)[0]] except: pass for s in season: try: episode.append([os.path.join(s, i) for i in control.listDir(s)[1] if i.endswith('.strm')][-1]) except: pass for file in episode: try: file = control.openFile(file) read = file.read() read = read.encode('utf-8') file.close() if not read.startswith(sys.argv[0]): raise Exception() params = dict(urlparse.parse_qsl(read.replace('?',''))) try: tvshowtitle = params['tvshowtitle'] except: tvshowtitle = None try: tvshowtitle = params['show'] except: pass if tvshowtitle == None or tvshowtitle == '': raise Exception() year, imdb, tvdb = params['year'], params['imdb'], params['tvdb'] imdb = 'tt' + re.sub('[^0-9]', '', str(imdb)) try: tmdb = params['tmdb'] except: tmdb = '0' items.append({'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb}) except: pass items = [i for x, i in enumerate(items) if i not in items[x + 1:]] if len(items) == 0: raise Exception() except: return try: lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}') lib = unicode(lib, 'utf-8', errors='ignore') lib = json.loads(lib)['result']['tvshows'] except: return if info == 'true' and not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'): control.infoDialog(control.lang(32553).encode('utf-8'), time=10000000) self.infoDialog = True try: control.makeFile(control.dataPath) dbcon = database.connect(control.libcacheFile) dbcur = dbcon.cursor() dbcur.execute("CREATE TABLE IF NOT EXISTS tvshows (""id TEXT, ""items TEXT, ""UNIQUE(id)"");") except: return try: from resources.lib.indexers import episodes except: return files_added = 0 # __init__ doesn't get called from services so self.date never gets updated and new episodes are not added to the library self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5)) if control.setting('library.importdelay') != 'true': self.date = self.datetime.strftime('%Y%m%d') else: self.date = (self.datetime - datetime.timedelta(hours=24)).strftime('%Y%m%d') for item in items: it = None if xbmc.abortRequested == True: return sys.exit() try: dbcur.execute("SELECT * FROM tvshows WHERE id = '%s'" % item['tvdb']) fetch = dbcur.fetchone() it = eval(fetch[1].encode('utf-8')) except: pass try: if not it == None: raise Exception() it = episodes.episodes().get(item['tvshowtitle'], item['year'], item['imdb'], item['tvdb'], idx=False) status = it[0]['status'].lower() it = [{'title': i['title'], 'year': i['year'], 'imdb': i['imdb'], 'tvdb': i['tvdb'], 'season': i['season'], 'episode': i['episode'], 'tvshowtitle': i['tvshowtitle'], 'premiered': i['premiered']} for i in it] if status == 'continuing': raise Exception() dbcur.execute("INSERT INTO tvshows Values (?, ?)", (item['tvdb'], repr(it))) dbcon.commit() except: pass try: id = [item['imdb'], item['tvdb']] if not item['tmdb'] == '0': id += [item['tmdb']] ep = [x['title'].encode('utf-8') for x in lib if str(x['imdbnumber']) in id or (x['title'].encode('utf-8') == item['tvshowtitle'] and str(x['year']) == item['year'])][0] ep = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}' % ep) ep = unicode(ep, 'utf-8', errors='ignore') ep = json.loads(ep).get('result', {}).get('episodes', {}) ep = [{'season': int(i['season']), 'episode': int(i['episode'])} for i in ep] ep = sorted(ep, key=lambda x: (x['season'], x['episode']))[-1] num = [x for x,y in enumerate(it) if str(y['season']) == str(ep['season']) and str(y['episode']) == str(ep['episode'])][-1] it = [y for x,y in enumerate(it) if x > num] if len(it) == 0: continue except: continue for i in it: try: if xbmc.abortRequested == True: return sys.exit() premiered = i.get('premiered', '0') if (premiered != '0' and int(re.sub('[^0-9]', '', str(premiered))) > int(self.date)) or (premiered == '0' and not self.include_unknown): continue libtvshows().strmFile(i) files_added += 1 except: pass if self.infoDialog == True: control.infoDialog(control.lang(32554).encode('utf-8'), time=1) if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0: control.execute('UpdateLibrary(video)')
def _thread_run(self, **kwargs): input_fn = kwargs.get('input_fn') queue = self.queue dispatch_queue = self.dispatch_queue device = kwargs.get('device') drs = kwargs.get('default_ranges').get touches = {} touches_sent = [] point = {} l_points = [] # prepare some vars to get limit of some component range_min_position_x = 0 range_max_position_x = 2048 range_min_position_y = 0 range_max_position_y = 2048 range_min_pressure = 0 range_max_pressure = 255 range_min_abs_x = 0 range_max_abs_x = 255 range_min_abs_y = 0 range_max_abs_y = 255 range_min_abs_pressure = 0 range_max_abs_pressure = 255 invert_x = int(bool(drs('invert_x', 0))) invert_y = int(bool(drs('invert_y', 1))) rotation = drs('rotation', 0) def assign_coord(point, value, invert, coords): cx, cy = coords if invert: value = 1. - value if rotation == 0: point[cx] = value elif rotation == 90: point[cy] = value elif rotation == 180: point[cx] = 1. - value elif rotation == 270: point[cy] = 1. - value def assign_rel_coord(point, value, invert, coords): cx, cy = coords if invert: value = -1 * value if rotation == 0: point[cx] += value elif rotation == 90: point[cy] += value elif rotation == 180: point[cx] += -value elif rotation == 270: point[cy] += -value def process_as_multitouch(tv_sec, tv_usec, ev_type, ev_code, ev_value): # sync event if ev_type == EV_SYN: if ev_code == SYN_MT_REPORT: if 'id' not in point: return l_points.append(point.copy()) elif ev_code == SYN_REPORT: process(l_points) del l_points[:] elif ev_type == EV_MSC and ev_code in (MSC_RAW, MSC_SCAN): pass else: # compute multitouch track if ev_code == ABS_MT_TRACKING_ID: point.clear() point['id'] = ev_value elif ev_code == ABS_MT_POSITION_X: val = normalize(ev_value, range_min_position_x, range_max_position_x) assign_coord(point, val, invert_x, 'xy') elif ev_code == ABS_MT_POSITION_Y: val = 1. - normalize(ev_value, range_min_position_y, range_max_position_y) assign_coord(point, val, invert_y, 'yx') elif ev_code == ABS_MT_ORIENTATION: point['orientation'] = ev_value elif ev_code == ABS_MT_BLOB_ID: point['blobid'] = ev_value elif ev_code == ABS_MT_PRESSURE: point['pressure'] = normalize(ev_value, range_min_pressure, range_max_pressure) elif ev_code == ABS_MT_TOUCH_MAJOR: point['size_w'] = ev_value elif ev_code == ABS_MT_TOUCH_MINOR: point['size_h'] = ev_value def process_as_mouse_or_keyboard( tv_sec, tv_usec, ev_type, ev_code, ev_value): if ev_type == EV_SYN: if ev_code == SYN_REPORT: process([point]) elif ev_type == EV_REL: if ev_code == 0: assign_rel_coord(point, min(1., max(-1., ev_value / 1000.)), invert_x, 'xy') elif ev_code == 1: assign_rel_coord(point, min(1., max(-1., ev_value / 1000.)), invert_y, 'yx') elif ev_type != EV_KEY: if ev_code == ABS_X: val = normalize(ev_value, range_min_abs_x, range_max_abs_x) assign_coord(point, val, invert_x, 'xy') elif ev_code == ABS_Y: val = 1. - normalize(ev_value, range_min_abs_y, range_max_abs_y) assign_coord(point, val, invert_y, 'yx') elif ev_code == ABS_PRESSURE: point['pressure'] = normalize(ev_value, range_min_abs_pressure, range_max_abs_pressure) else: buttons = { 272: 'left', 273: 'right', 274: 'middle', 275: 'side', 276: 'extra', 277: 'forward', 278: 'back', 279: 'task', 330: 'touch', 320: 'pen'} if ev_code in buttons.keys(): if ev_value: if 'button' not in point: point['button'] = buttons[ev_code] point['id'] += 1 if '_avoid' in point: del point['_avoid'] elif 'button' in point: if point['button'] == buttons[ev_code]: del point['button'] point['id'] += 1 point['_avoid'] = True else: if ev_value == 1: z = keyboard_keys[ev_code][-1 if 'shift' in Window._modifiers else 0] if z == 'shift' or z == 'alt': Window._modifiers.append(z) dispatch_queue.append(('key_down', ( Keyboard.keycodes[z.lower()], ev_code, keys_str.get(z, z), Window._modifiers))) elif ev_value == 0: z = keyboard_keys[ev_code][-1 if 'shift' in Window._modifiers else 0] dispatch_queue.append(('key_up', ( Keyboard.keycodes[z.lower()], ev_code, keys_str.get(z, z), Window._modifiers))) if z == 'shift': Window._modifiers.remove('shift') def process(points): if not is_multitouch: dispatch_queue.append(('mouse_pos', ( points[0]['x'] * Window.width, points[0]['y'] * Window.height))) actives = [args['id'] for args in points if 'id' in args and '_avoid' not in args] for args in points: tid = args['id'] try: touch = touches[tid] if touch.sx == args['x'] and touch.sy == args['y']: continue touch.move(args) if tid not in touches_sent: queue.append(('begin', touch)) touches_sent.append(tid) queue.append(('update', touch)) except KeyError: if '_avoid' not in args: touch = HIDMotionEvent(device, tid, args) touches[touch.id] = touch if tid not in touches_sent: queue.append(('begin', touch)) touches_sent.append(tid) for tid in list(touches.keys())[:]: if tid not in actives: touch = touches[tid] if tid in touches_sent: touch.update_time_end() queue.append(('end', touch)) touches_sent.remove(tid) del touches[tid] def normalize(value, vmin, vmax): return (value - vmin) / float(vmax - vmin) # open the input fd = open(input_fn, 'rb') # get the controler name (EVIOCGNAME) device_name = str(fcntl.ioctl(fd, EVIOCGNAME + (256 << 16), " " * 256)).split('\x00')[0] Logger.info('HIDMotionEvent: using <%s>' % device_name) # get abs infos bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l) bit, = struct.unpack('Q', bit) is_multitouch = False for x in range(EV_MAX): # preserve this, we may want other things than EV_ABS if x != EV_ABS: continue # EV_ABS available for this device ? if (bit & (1 << x)) == 0: continue # ask abs info keys to the devices sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16), ' ' * sz_l) sbit, = struct.unpack('Q', sbit) for y in range(KEY_MAX): if (sbit & (1 << y)) == 0: continue absinfo = fcntl.ioctl(fd, EVIOCGABS + y + (struct_input_absinfo_sz << 16), ' ' * struct_input_absinfo_sz) abs_value, abs_min, abs_max, abs_fuzz, \ abs_flat, abs_res = struct.unpack('iiiiii', absinfo) if y == ABS_MT_POSITION_X: is_multitouch = True range_min_position_x = drs('min_position_x', abs_min) range_max_position_x = drs('max_position_x', abs_max) Logger.info('HIDMotionEvent: ' + '<%s> range position X is %d - %d' % ( device_name, abs_min, abs_max)) elif y == ABS_MT_POSITION_Y: is_multitouch = True range_min_position_y = drs('min_position_y', abs_min) range_max_position_y = drs('max_position_y', abs_max) Logger.info('HIDMotionEvent: ' + '<%s> range position Y is %d - %d' % ( device_name, abs_min, abs_max)) elif y == ABS_MT_PRESSURE: range_min_pressure = drs('min_pressure', abs_min) range_max_pressure = drs('max_pressure', abs_max) Logger.info('HIDMotionEvent: ' + '<%s> range pressure is %d - %d' % ( device_name, abs_min, abs_max)) elif y == ABS_X: range_min_abs_x = drs('min_abs_x', abs_min) range_max_abs_x = drs('max_abs_x', abs_max) Logger.info('HIDMotionEvent: ' + '<%s> range ABS X position is %d - %d' % ( device_name, abs_min, abs_max)) elif y == ABS_Y: range_min_abs_y = drs('min_abs_y', abs_min) range_max_abs_y = drs('max_abs_y', abs_max) Logger.info('HIDMotionEvent: ' + '<%s> range ABS Y position is %d - %d' % ( device_name, abs_min, abs_max)) elif y == ABS_PRESSURE: range_min_abs_pressure = drs( 'min_abs_pressure', abs_min) range_max_abs_pressure = drs( 'max_abs_pressure', abs_max) Logger.info('HIDMotionEvent: ' + '<%s> range ABS pressure is %d - %d' % ( device_name, abs_min, abs_max)) # init the point if not is_multitouch: point = {'x': .5, 'y': .5, 'id': 0, '_avoid': True} # read until the end while fd: data = fd.read(struct_input_event_sz) if len(data) < struct_input_event_sz: break # extract each event for i in range(int(len(data) / struct_input_event_sz)): ev = data[i * struct_input_event_sz:] # extract timeval + event infos infos = struct.unpack('LLHHi', ev[:struct_input_event_sz]) if is_multitouch: process_as_multitouch(*infos) else: process_as_mouse_or_keyboard(*infos)
def format_table(table, column_names=None, column_specs=None, max_col_width=32, report_dimensions=False): ''' Table pretty printer. Expects tables to be given as arrays of arrays. Example: print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']) ''' if len(table) > 0: col_widths = [0] * len(table[0]) elif column_specs is not None: col_widths = [0] * (len(column_specs) + 1) elif column_names is not None: col_widths = [0] * len(column_names) my_column_names = [] if column_specs is not None: column_names = ['Row'] column_names.extend([col['name'] for col in column_specs]) column_specs = [{'name': 'Row', 'type': 'float'}] + column_specs if column_names is not None: for i in range(len(column_names)): my_col = str(column_names[i]) if len(my_col) > max_col_width: my_col = my_col[:max_col_width-1] + u'…' my_column_names.append(my_col) col_widths[i] = max(col_widths[i], len(my_col)) my_table = [] for row in table: my_row = [] for i in range(len(row)): my_item = escape_unicode_string(str(row[i])) if len(my_item) > max_col_width: my_item = my_item[:max_col_width-1] + u'…' my_row.append(my_item) col_widths[i] = max(col_widths[i], len(my_item)) my_table.append(my_row) def border(i): return WHITE() + i + ENDC() type_colormap = {'boolean': BLUE(), 'integer': YELLOW(), 'float': WHITE(), 'string': GREEN()} for i in 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64': type_colormap[i] = type_colormap['integer'] type_colormap['double'] = type_colormap['float'] def col_head(i): if column_specs is not None: return BOLD() + type_colormap[column_specs[i]['type']] + column_names[i] + ENDC() else: return BOLD() + WHITE() + column_names[i] + ENDC() formatted_table = [border(u'┌') + border(u'┬').join(border(u'─')*i for i in col_widths) + border(u'┐')] if len(my_column_names) > 0: padded_column_names = [col_head(i) + ' '*(col_widths[i]-len(my_column_names[i])) for i in range(len(my_column_names))] formatted_table.append(border(u'│') + border(u'│').join(padded_column_names) + border(u'│')) formatted_table.append(border(u'├') + border(u'┼').join(border(u'─')*i for i in col_widths) + border(u'┤')) for row in my_table: padded_row = [row[i] + ' '*(col_widths[i]-len(row[i])) for i in range(len(row))] formatted_table.append(border(u'│') + border(u'│').join(padded_row) + border(u'│')) formatted_table.append(border(u'└') + border(u'┴').join(border(u'─')*i for i in col_widths) + border(u'┘')) if report_dimensions: return '\n'.join(formatted_table), len(formatted_table), sum(col_widths) + len(col_widths) + 1 else: return '\n'.join(formatted_table)
def increase_precision(code, var_type, new_precision, **kwargs): """ Function to increase the precision of a variable type in the fortran code. Args: code (list): List of strings containing the code. var_type (string): Type of variable to change the precision. new_precision (int): New Variable precision Kwargs: terminal: Terminal in which to write what the program is doing and the problems its encountering. Returns: An altered list of strings with the new precision. """ terminal_present = False if 'terminal' in kwargs: terminal_present = True terminal = kwargs['terminal'] terminal.add_line('') terminal.add_line('Increasing precision of {} variables to {}...'.format(var_type, new_precision)) else: print('Increasing precision of {} variable to {}...'.format(var_type, new_precision)) try: for i, line in enumerate(code): position = fparsertools.find_command(line, var_type) if position != None: position += len(var_type) if line[position] == '(' and line[position + 1].lower() != 'k': position += 1 closing_parenthesis = fparsertools.find_closing_parenthesis(line, position) old_precision = int(line[position:closing_parenthesis]) final_position = closing_parenthesis elif line[position] == '(' and line[position + 1].lower() == 'k': position += len('(kind=') closing_parenthesis = fparsertools.find_closing_parenthesis(line, position) old_precision = int(line[position: closing_parenthesis]) final_position = closing_parenthesis elif line[position] == '*': position += 1 next_item = (line.find(' ', position), line.find(',', position), line.find(':', position)) min = len(line) for j in next_item: if j < min and j != -1: min = j old_precision = int(line[position: min]) final_position = min - 1 else: old_precision = 0 position += 1 final_position = position if old_precision < new_precision: code[i] = line[:position-1] + '()' + line[position-1:] if old_precision < new_precision: code[i] = code[i][:position] + '{}'.format(new_precision) + code[i][final_position + len(str(old_precision)) - 1:] if terminal_present: terminal.add_line('Success: Increased Precision') else: print('Success: Increased Precision') return code except Exception as e: if terminal_present: terminal.add_line('Error: ' + str(e), number=2) else: print(e) return None
def getWeatherThread(self): global g_updateRunning print "MetrixHDWeatherStandalone lookup for ID " + str(self.woeid) url = "http://query.yahooapis.com/v1/public/yql?q=select%20item%20from%20weather.forecast%20where%20woeid%3D%22"+str(self.woeid)+"%22&format=xml" #url = "http://query.yahooapis.com/v1/public/yql?q=select%20item%20from%20weather.forecast%20where%20woeid%3D%22"+str(self.woeid)+"%22%20u%3Dc&format=xml" # where location in (select id from weather.search where query="oslo, norway") try: file = urllib2.urlopen(url, timeout=2) data = file.read() file.close() except Exception as error: print "Cant get weather data: %r" % error # cancel weather function config.plugins.MetrixWeather.currentWeatherDataValid.value = False g_updateRunning = False return dom = parseString(data) try: title = self.getText(dom.getElementsByTagName('title')[0].childNodes) except IndexError as error: print "Cant get weather data: %r" % error g_updateRunning = False self.startTimer(True,30) if self.check: #text = "%s\n%s|" % (str(error),data) text = "%s|" % str(error) self.writeCheckFile(text) return config.plugins.MetrixWeather.currentLocation.value = str(title).split(',')[0].replace("Conditions for ","") currentWeather = dom.getElementsByTagName('yweather:condition')[0] #check returned date from weather values t=time() lastday = strftime("%d %b %Y", localtime(t-3600*24)).strip("0") currday = strftime("%d %b %Y", localtime(t)).strip("0") currentWeatherDate = currentWeather.getAttributeNode('date').nodeValue #if not (currday in currentWeatherDate or lastday in currentWeatherDate): # print "MetrixHDWeatherStandalone - get weather data failed. (current date = %s, returned date = %s)" %(currday, currentWeatherDate) # config.plugins.MetrixWeather.currentWeatherDataValid.value = False # g_updateRunning = False # self.refreshcnt += 1 # self.startTimer(True) # return # print "MetrixHDWeatherStandalone - get weather data successful. (current date = %s, returned date = %s)" %(currday, currentWeatherDate) config.plugins.MetrixWeather.currentWeatherDataValid.value = True currentWeatherCode = currentWeather.getAttributeNode('code') config.plugins.MetrixWeather.currentWeatherCode.value = self.ConvertCondition(currentWeatherCode.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('temp') config.plugins.MetrixWeather.currentWeatherTemp.value = self.getTemp(currentWeatherTemp.nodeValue) currentWeatherText = currentWeather.getAttributeNode('text') config.plugins.MetrixWeather.currentWeatherText.value = currentWeatherText.nodeValue n = 0 currentWeather = dom.getElementsByTagName('yweather:forecast')[n] if lastday in currentWeather.getAttributeNode('date').nodeValue and currday in currentWeatherDate: n = 1 currentWeather = dom.getElementsByTagName('yweather:forecast')[n] currentWeatherCode = currentWeather.getAttributeNode('code') config.plugins.MetrixWeather.forecastTodayCode.value = self.ConvertCondition(currentWeatherCode.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('high') config.plugins.MetrixWeather.forecastTodayTempMax.value = self.getTemp(currentWeatherTemp.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('low') config.plugins.MetrixWeather.forecastTodayTempMin.value = self.getTemp(currentWeatherTemp.nodeValue) currentWeatherText = currentWeather.getAttributeNode('text') config.plugins.MetrixWeather.forecastTodayText.value = currentWeatherText.nodeValue currentWeather = dom.getElementsByTagName('yweather:forecast')[n + 1] currentWeatherCode = currentWeather.getAttributeNode('code') config.plugins.MetrixWeather.forecastTomorrowCode.value = self.ConvertCondition(currentWeatherCode.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('high') config.plugins.MetrixWeather.forecastTomorrowTempMax.value = self.getTemp(currentWeatherTemp.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('low') config.plugins.MetrixWeather.forecastTomorrowTempMin.value = self.getTemp(currentWeatherTemp.nodeValue) currentWeatherText = currentWeather.getAttributeNode('text') config.plugins.MetrixWeather.forecastTomorrowText.value = currentWeatherText.nodeValue config.plugins.MetrixWeather.save() g_updateRunning = False self.refreshcnt = 0
#!/usr/bin/python # -*- coding: utf-8 -*- import codecs import nltk fp1 = codecs.open("/home/tj/big_data/data/jinyong/utf8/x1.txt", "r", "utf-8") text = fp1.read() text1 = text.split(' ') fdist1 = nltk.FreqDist(text1) fdist2 = sorted(fdist1.iteritems(), key=lambda d:d[1], reverse = True) for i in xrange(0,200): print (fdist2[i][0] + ":" + str(fdist2[i][1])) fp1.close()
def start_translation(self): # translatedStringFile.delete('1.0', END) global filename print(str(self.langComboBox.currentText())) if str(self.langComboBox.currentText()) == "English, (en_US)": translate_to = "en" elif str(self.langComboBox.currentText()) == "German, Germany (de_DE)": translate_to = "de" elif str(self.langComboBox.currentText()) == "Chinese, PRC (zh_CN)": translate_to = "zh" elif str(self.langComboBox.currentText() ) == "Czech, Czech Republic (cs_CZ)": translate_to = "cs" elif str(self.langComboBox.currentText()) == "Dutch, Belgium (nl_BE)": translate_to = "nl" elif str(self.langComboBox.currentText()) == "French, France (fr_FR)": translate_to = "fr" elif str(self.langComboBox.currentText()) == "Italian, Italy (it_IT)": translate_to = "it" elif str(self.langComboBox.currentText()) == "Japanese (ja_JP)": translate_to = "ja" elif str(self.langComboBox.currentText()) == "Korean (ko_KR)": translate_to = "ko" elif str(self.langComboBox.currentText()) == "Polish (pl_PL)": translate_to = "pl" elif str(self.langComboBox.currentText()) == "Russian (ru_RU)": translate_to = "ru" elif str(self.langComboBox.currentText()) == "Spanish (es_ES)": translate_to = "es" elif str(self.langComboBox.currentText() ) == "Bulgarian, Bulgaria (bg_BG)": translate_to = "bg" elif str(self.langComboBox.currentText()) == "Catalan, Spain (ca_ES)": translate_to = "ca" elif str(self.langComboBox.currentText()) == "Danish, Denmark(da_DK)": translate_to = "da" elif str(self.langComboBox.currentText()) == "Greek, Greece (el_GR)": translate_to = "el" elif str(self.langComboBox.currentText()) == "Hindi, India (hi_IN)": translate_to = "hi" elif str(self.langComboBox.currentText() ) == "Hungarian, Hungary (hu_HU)": translate_to = "hu" elif str(self.langComboBox.currentText() ) == "Portuguese, Brazil (pt_BR)": translate_to = "pt" elif str(self.langComboBox.currentText() ) == "Romanian, Romania (ro_RO)": translate_to = "ro" elif str(self.langComboBox.currentText()) == "Serbian (sr_RS)": translate_to = "sr" elif str( self.langComboBox.currentText()) == "Slovak, Slovakia (sk_SK)": translate_to = "sk" elif str(self.langComboBox.currentText() ) == "Slovenian, Slovenia (sl_SI)": translate_to = "sl" elif str(self.langComboBox.currentText()) == "Spanish, US (es_US)": translate_to = "es" elif str(self.langComboBox.currentText()) == "Swedish, Sweden (sv_SE)": translate_to = "sv" elif str(self.langComboBox.currentText()) == "Thai, Thailand (th_TH)": translate_to = "th" elif str(self.langComboBox.currentText()) == "Turkish, Turkey (tr_TR)": translate_to = "hu" elif str(self.langComboBox.currentText() ) == "Ukrainian, Ukraine (uk_UA)": translate_to = "uk" elif str(self.langComboBox.currentText() ) == "Vietnamese, Vietnam (vi_VN)": translate_to = "vi" else: translate_to = "" print("Translating to : " + translate_to) if filename is not "/": global totalStrings print(filename) xmlDoc = minidom.parse(filename) stringList = xmlDoc.getElementsByTagName('string') totalStrings = int(len(stringList)) print(totalStrings) self.progressBar.setProperty("maximum", totalStrings) for s in stringList: try: try: if s.attributes['translatable'].value == "false": self.showTransStrings.insertPlainText( "<string name=" + "\"" + s.attributes['name'].value + "\"" + " translatable=\"false\"" + ">" + s.firstChild.nodeValue + "</string>" + "\n") except: self.showTransStrings.insertPlainText( "<string name=" + "\"" + s.attributes['name'].value + "\"" + ">" + self.do_translation( s.firstChild.nodeValue, translate_to) + "</string>" + "\n") except: self.showTransStrings.insertPlainText( "****Too many Tags in the String**** Name : " + s.attributes['name'].value) else: print("Please select a File")