def drawPortNames(self, painter): """ Paints port names next to PortWidget. See setShowPortNames(). """ if not self._showPortNames: return # factor should be 0.5, but text height is calculated to big titleHeightFactor = 0.4 if self._portNamesPosition == self.PORT_NAMES_NEXT_TO_PORTS: for port in self.sinkPorts(): if port.titleField(): port.titleField().paint(painter, port.x() + self.getDistance('rightMargin') + port.width(), port.y() - titleHeightFactor * port.getDistance('titleFieldHeight'), self.scale()) for port in self.sourcePorts(): if port.titleField(): #logging.debug(self.__class__.__name__ +": drawPortNames() - "+ port.name() +", "+ str(port.titleField()._autoscaleFlag)) port.titleField().paint(painter, port.x() - port.getDistance('titleFieldWidth') - self.getDistance('rightMargin'), port.y() - titleHeightFactor * port.getDistance('titleFieldHeight'), self.scale()) elif self._portNamesPosition == self.PORT_NAMES_ABOVE_PORTS: painter.pen().setWidth(2) for port in self.sinkPorts(): if port.titleField(): port.titleField().paint(painter, self.getDistance('firstSinkX'), port.y() - titleHeightFactor * port.getDistance('titleFieldHeight') - port.height(), self.scale()) for port in self.sourcePorts(): if port.titleField(): #logging.debug(self.__class__.__name__ +": drawPortNames() - "+ port.name() +", "+ str(port.titleField()._autoscaleFlag)) port.titleField().paint(painter, self.width() - port.getDistance('titleFieldWidth')- port.width()*0.5, port.y() - titleHeightFactor * port.getDistance('titleFieldHeight') - port.height(), self.scale()) else: logging.waring(self.__class__.__name__ +": drawPortNames() - "+ self.NO_VALID_PORT_NAMES_POSITION_MESSAGE)
def get_repay_records(self, url, opener=None): repay_records = self.open_url(url,opener) if repay_records is None: print("url:{} open failed".format(url)) logging.warning('repay_records,' + url) return {'repay_records_fail': True}, [] try: repay_records_dict = eval(repay_records)['data'] except: logging.warning('repay_records,' + url) return {'repay_records_fail': True}, [] if not isinstance(repay_records_dict, dict): logging.waring('repay_records,' + url) return {'repay_records_fail': True}, [] if 'phases' not in repay_records_dict: logging.warning('no_repay_records,' + url) return {'repay records not exits': True}, [] repay_status_list = [x['repayType'] for x in repay_records_dict['phases']] repay_status_dict = {} for i in set(repay_status_list): repay_status_dict[i] = repay_status_list.count(i) repay_status_dict['total_period'] = len(repay_status_list) repay_status_dict['repay_str'] = ';'.join([':'.join([x['repayType'], x['repayTime']]) for x in repay_records_dict['phases']]) repay_status_dict['repaid'] = repay_records_dict['repaid'] repay_status_dict['unRepaid'] = repay_records_dict['unRepaid'] repay_status_dict['total_repaid'] = sum(float(x['unRepaidAmount']) for x in repay_records_dict['phases']) return repay_status_dict, repay_records_dict['phases']
def _RecordHTTPResponse(self, params): required_fields = ['requestId', 'timestamp', 'response'] for field in required_fields: if field not in params: logging.waring('HTTP Response missing required field: %s', field) return self._http_responses.append(InspectorNetworkResponseData(self, params))
def load(self, epoch=None): if epoch is None: #Load latest checkpoint file ckpt_name = tf.train.latest_checkpoint(self.save_path) if ckpt_name is not None: self.saver_forrestore.restore(self.session, ckpt_name) logging.warning( "Loading model %s - this will screw up the epoch number during training" % ckpt_name) else: logging.warning("Training model from scratch") else: approx_gstep = int(epoch * self.batcher.get_training_size() / self.config['batch_size']) ckpt = tf.train.get_checkpoint_state(self.save_path) chosen_checkpoint_index = self._find_closest_path( ckpt, approx_gstep) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename( ckpt.all_model_checkpoint_paths[chosen_checkpoint_index]) self.saver_forrestore.restore( self.session, os.path.join(self.save_path, ckpt_name)) logging.warning( "Loading model %s - this will screw up the epoch number during training" % ckpt_name) else: logging.waring("Failed loading model")
def build_tiff(adate): """download and build geotiff""" joblist = [{ 'product': '1day', 'url': 'https://floodlight.ssec.wisc.edu/composite/RIVER-FLDglobal-composite1_{}_000000.part{}.tif' }, { 'product': '5day', 'url': 'https://floodlight.ssec.wisc.edu/composite/RIVER-FLDglobal-composite_{}_000000.part{}.tif' }] final_tiff = [] for entry in joblist: tiff_file = "VIIRS_{}_composite{}_flood.tiff".format( entry['product'], adate) if os.path.exists(tiff_file): final_tiff.append(tiff_file) continue tiff_l = [] for i in range(1, 137): dataurl = entry['url'].format(adate, str(i).zfill(3)) filename = dataurl.split('/')[-1] # try download file try: r = requests.get(dataurl, allow_redirects=True) except requests.RequestException as e: logging.warning("no download: " + dataurl) logging.waring('error:' + str(e)) continue # may not have files for some aio if r.status_code == 404: continue open(filename, 'wb').write(r.content) tiff_l.append(filename) vrt_file = tiff_file.replace('tiff', 'vrt') # build vrt vrt = gdal.BuildVRT(vrt_file, tiff_l) # translate to tiff # each tiff is 4GB in size gdal.Translate(tiff_file, vrt) # generate compressed tiff small_tiff = VIIRSimage + tiff_file gdal.Translate(small_tiff, tiff_file, options="-of GTiff -co COMPRESS=LZW -co TILED=YES") #remove all files vrt = None os.remove(vrt_file) for tif in tiff_l: os.remove(tif) logging.info("generated: " + tiff_file) final_tiff.append(tiff_file) return final_tiff
def Hire4Show3(dic): num = 0 counter = 0 hire = [] item_name = [] for k in dic.keys(): item_name.append(dic[k]) else: length = len(set(item_name)) #for i in range(1,len(dic.keys())+1): for i in range(1, length - 3): for ele in itertools.combinations(dic.keys(), i): #print(ele) logging.debug('{combination}'.format(combination=','.join(ele))) num = Good3(ele, Talents, dic) if num > counter: hire = ele counter = num print('len(ele) is 更新:', ele, len(ele), '// num:', counter) logging.waring( 'Optimum Solution:{hire} // fill condition:{num} // legth:{length}' .format(hire=','.join(hire), num=num, length=len(hire))) time.sleep(3) print('Optimum Solution:', hire, ',fill condition:', num, ',length:', len(hire))
def getAxisTitle(variable, number, order="pt", inclusive=False): numberDictPt = { 0: "leading", 1: "second", 2: "third", 3: "fourth", 4: "fifth", 5: "sixth", 6: "seventh" } numberDictCSV = { 0: "", 1: "second ", 2: "third ", 3: "fourth ", 4: "fifth ", 5: "sixth ", 6: "seventh " } nicevars = {"pt": "p_{T}", "csv": "CSV Value", "deepcsv": "DeepCSV Value"} if not variable in nicevars.keys(): logging.waring( "Variables not in dict with nice names. Just using the variable") nicename = variable else: nicename = nicevars[variable] if not number in numberDictPt.keys(): logging.waring( "Variables not in dict with nice number Just using the number") nicenumber = number else: if order == "pt": nicenumber = numberDictPt[number] else: nicenumber = numberDictCSV[number] if order == "pt": if inclusive: return "{0} of all jets".format(nicename) else: return "{0} of {1} jet".format(nicename, nicenumber) elif order == "csv": if inclusive: return "{0} of all jets ordered by CSV".format(nicename) else: return "{0} of jet with {1}highest CSV".format( nicename, nicenumber) elif order == "deepcsv": if inclusive: return "{0} of all jets ordered by DeepCSV".format(nicename) else: return "{0} of jet with {1}highest DeepCSV".format( nicename, nicenumber) else: logging.error("Order not defined. Returning empty sting") return ""
def commit(self): global _db_ctx logging.info('commit sql stmt...') try: _db_ctx.connection.commit() logging.info('commit ok.') except: logging.waring('commit faild. try rollback...')
def do_add_stereo_images(argv): try: opts, args = getopt.getopt(argv, 'a:b:f:r:p:t:', [ 'azimuth=', 'building=', 'focal=', 'offset=', 'rotate=', 'position=' ]) except getopt.GetoptError: logging.exception('输入的参数不正确') sys.exit(1) azimuth = .0 building = 'wuxin' offset = .0, .0, .0 rotate = 0 position = .0, .0, .0 focal = None for o, a in opts: if o in ('-a', '--azimuth'): azimuth = float(a) elif o in ('-b', '--building'): building = a elif o in ('-f', '--focal'): focal = [float(s) for s in a.split(',')] elif o in ('-o', '--offset'): offset = [float(s) for s in a.split(',')] elif o in ('-r', '--rotate'): rotate = float(a) elif o in ('-p', '--position'): position = [float(s) for s in a.split(',')] try: filename1, filename2 = args[:2] except IndexError: logging.warning('缺少图像文件名称,必须有左右视野的两张照片') return for s in filename1, filename2: if not os.path.exists(s): logging.warning('图像文件 %s 不存在', s) return if focal is None: logging.waring('缺少相机参数') return if len(args[2:]): logging.warning('只有第一组图像文件 %s 被处理,其他文件都会被忽略', (filename1, filename2)) regions = get_region_list() rindex = find_region_by_position(regions, position) if rindex is None: logging.warning('位置 %s 对应的区域在数据库中不存在', position) return add_stereo_images(building, position, azimuth, focal, rotate, offset, filename1, filename2)
def downloadBreed(breed): r = requests.get(mainURL + breed, verify=False) logging.info('Downloading File: ' + breed) with open(workDir + breed, "wb") as binFile: binFile.write(r.content) if checkHash(breed): logging.info('Hash of ' + breed + ' OK!') else: os.remove(workDir + breed) logging.waring(breed + ' Hash Fail')
def _RecordHTTPResponse(self, params): required_fields = ['requestId', 'timestamp', 'response'] for field in required_fields: if field not in params: logging.waring('HTTP Response missing required field: %s', field) return request_id = params['requestId'] assert request_id in self._initiators initiator = self._initiators[request_id] self._http_responses.append( InspectorNetworkResponseData(self, params, initiator))
async def __call__(self, request): kw = None if self._has_var_kw_arg or self._has_named_kw_args or self._required_kw_args: if request.method == 'POST': if not request.content_type: return web.HTTPBadRequest('Missing Content-Type.') ct = request.content_type.lower() if ct.startswith('application/json'): params = await request.json() if not isinstance(params, dict): return web.HTTPBadRequest('JSON body must be object.') kw = params elif ct.startswith('application/x-www-form-urlencoded' ) or ct.startswith('multipart/form-data'): params = await request.post() kw = dict(**params) else: return web.HTTPBadRequest('unsupported Content-Type:%s ' % request.content_type) if request.method == 'GET': qs = request.query_string if qs: kw = dict() for k, v in parse.parse_qs(qs, True).items(): kw[k] = v[0] if kw is None: kw = dict(**request.match_info) else: if not self._has_var_kw_arg and self._named_kw_args: #remove all unamed kw: copy = dict() for name in self._named_kw_args: if name in kw: copy[name] = kw[name] kw = copy #check named arg: for k, v in request.match_info.items(): if k in kw: logging.waring( 'Duplicate arg name in named arg and kw arg:%s' % k) kw[k] = v if self._has_request_arg: kw['request'] = request #check required kw: if self._required_kw_args: for name in self._required_kw_args: if not name in kw: return web.HTTPBadRequest('Missing argument: %s ' % name) logging.info('call with args:%s' % str(kw)) try: r = await self._func(**kw) return r except APIError as e: return dict(error=e.error, data=e.data, message=e.message)
def __init__(self, verbose=0, nBuffer=3): # 1 verbose, 2: very verbose self.verbose = verbose # number of buffered marker coordinate set if nBuffer >= 3: self.nBuffer = nBuffer else: logging.waring("illegal number for nBuffer: {}, using nBuffer=3".format(nBuffer)) self.sock = 0 self.stoppingStream = False; self.win32TimerOffset = time.time() time.clock() # start time.clock
def pointLineList(linelistP): """Get a list of lists of tuples from a JSON string. Those lists represent lines with control points. >>> pointLineList('[[{"x":606,"y":411,"time":33}, {"x":605,"y":411,"time":35}, {"x":605,"y":412,"time":39}]]') [[{u'y': 411, u'x': 606, u'time': 33}, {u'y': 411, u'x': 605, u'time': 35}, {u'y': 412, u'x': 605, u'time': 39}]] """ global logging linelist = json.loads(linelistP) if len(linelist) == 0: logging.waring("Pointlist was empty. Search for '" + linelistP + "' in `wm_raw_draw_data`.") return linelist
def getHistoMean(inputfile, histoname): logging.debug("Getting mean form histogram: {0}".format(histoname)) if isHistoinFile(inputfile, histoname): h = inputfile.Get(histoname) mean = h.GetMean() logging.debug("Mean of histogram {0} --> {1}".format(histoname, mean)) if mean == 0: logging.waring( "Mean of histogram {0} in file {1} is Zero! Please Check.". format(histoname, inputfile)) else: mean = 0 logging.error("Histogram not in file! Please check.") return mean
def 鏡頭(讀者, 鏡頭符號, 內容): if 鏡頭符號 == '+': try: d = yaml.load(內容) 鏡頭.生成鏡頭(d) except: logging.warning(f'鏡頭「{內容}」的內容不正確。') 讀者.步進() elif 鏡頭符號 == '-': try: 鏡頭.解除鏡頭(yaml.load(內容)) except: logging.waring(f'鏡頭「{內容}」的內容不正確。') 讀者.步進()
def saveIni(self): """ write options to ini """ logging.debug(__name__ + ": saveIni") if not self.plugin(): logging.waring(self.__class__.__name__ +": saveIni() - No plugin set. Aborting...") return ini = self.plugin().application().ini() if not ini.has_section("view"): ini.add_section("view") if self.currentCenterViewClassId(): ini.set("view", "CurrentView", self.currentCenterViewClassId()) if hasattr(self.centerView(), "boxContentScript"): ini.set("view", "box content script", self.centerView().boxContentScript()) self.plugin().application().writeIni()
def pointLineList(linelistP): """Get a list of lists of tuples from a JSON string. Those lists represent lines with control points. >>> pointLineList('[[{"x":606,"y":411,"time":33}, {"x":605,"y":411,"time":35}, {"x":605,"y":412,"time":39}]]') [[{u'y': 411, u'x': 606, u'time': 33}, {u'y': 411, u'x': 605, u'time': 35}, {u'y': 412, u'x': 605, u'time': 39}]] """ global logging linelist = json.loads(linelistP) if len(linelist) == 0: logging.waring( "Pointlist was empty. Search for '" + linelistP + "' in `wm_raw_draw_data`." ) return linelist
def __init__(self, verbose=0, nBuffer=3): # 1 verbose, 2: very verbose self.verbose = verbose # number of buffered marker coordinate set if nBuffer >= 3: self.nBuffer = nBuffer else: logging.waring( "illegal number for nBuffer: {}, using nBuffer=3".format( nBuffer)) self.sock = 0 self.stoppingStream = False self.win32TimerOffset = time.time() time.clock() # start time.clock
def create_tf_record(output_filename, file_pars): writer = tf.python_io.TFRecordWriter(output_filename) for data, label in file_pars: try: tf_example = dict_to_tf_example(data, label) if tf_example is None: continue writer.write(tf_example.SerializeToString()) except ValueError: logging.waring('Invalid example: %s or %s, ignoring.'%(data, label)) writer.close()
def getEffectivePortHeight(self, port): """ Returns the bigger value of the source height and the height of the port name text field. """ portHeight = port.height() if not self._showPortNames: return portHeight titleHeight = port.titleField().getHeight() * self.scale() if self._portNamesPosition == self.PORT_NAMES_NEXT_TO_PORTS: return max(portHeight, titleHeight) elif self._portNamesPosition == self.PORT_NAMES_ABOVE_PORTS: return portHeight + titleHeight logging.waring(self.__class__.__name__ +": getEffectivePortHeight() - "+ self.NO_VALID_PORT_NAMES_POSITION_MESSAGE) return 0
def process(input, output): """ Perform advanced image process on image at input and write result to ouput """ try: image = Image.open(input) new_image = image.rotate(90, expand=True) new_image.save(output) except UnidentifiedImageError: logging.warning('File is not an image') except ValueError: logging.warning('Can not read file') except FileNotFoundError: logging.waring('File not found')
def saveIni(self): """ write options to ini """ logging.debug(__name__ + ": saveIni") if not self.plugin(): logging.waring(self.__class__.__name__ + ": saveIni() - No plugin set. Aborting...") return ini = self.plugin().application().ini() if not ini.has_section("view"): ini.add_section("view") if self.currentCenterViewClassId(): ini.set("view", "CurrentView", self.currentCenterViewClassId()) if hasattr(self.centerView(), "boxContentScript"): ini.set("view", "box content script", self.centerView().boxContentScript()) self.plugin().application().writeIni()
def __new__(cls, name, bases, attrs): # skip base model class: if name == 'Model': return type.__new__(cls, name, bases, attrs) # store all subclasses info: if not hasattr(cls, 'subclasses'): cls.subclasses = {} if not name in cls.subclasses: cls.subclasses[name] = name else: logging.warning('Redefine class: %s' % name) logging.info('Scan ORMapping %s...' % name) mappings = dict() primary_key = None for k, v in attrs.iteritems(): if isinstance(v, Field): if not v.name: v.name = k logging.info('Found mapping: %s => %s' % (k, v)) # check duplicate primary key: if v.primary_key: if primary_key: raise TypeError('Cannot define more than 1 primary key in class: %s' % name) if v.updatable: logging.warning('NOTE: change primary key to non-updatable.') v.updatable = False if v.nullable: logging.waring('NOTE: change primary key to non-nullable.') v.nullable = False primary_key = v mappings[k] = v # check exist of primary key: if not primary_key: raise TypeError('Primary key not defined in class: %s', name) for k in mappings.iterkeys(): attrs.pop(k) if not '__table__' in attrs: attrs['__table__'] = name.lower() attrs['__mappings__'] = mappings attrs['__primary_key__'] = primary_key attrs['__sql__'] = lambda self: _gen_sql(attrs['__table__'], mappings) for trigger in _triggers: if not trigger in attrs: attrs[trigger] = None return type.__new__(cls, name, bases, attrs)
def interface(): """This function determines what happens when the user is directed to the / part of the application""" create_log() # Here the log file is opened or created try: scheduler.run(blocking=False) return '<strong> Hello before you use the app some of your preferences need to be taken to give you breifings personalised to your needs <strong>\ <form action="/index" method="get">\ <label for="newspaper"> Please enter a news website:</label>\ <input type="text" name="newspaper">\ <label for="location"> Please enter your location:</label>\ <input type="text" name="location">\ <input type="submit">\ </form>' except: logging.waring('An error has occured in the program') raise RuntimeError
def save(self): try: sql = "insert into testtable(time, name, type, data) values('%s','%s', '%s', '%s')" % ( datetime.datetime.now(), self.random_str(), self.random_str(), self.random_str()) self.cursor.execute(sql) rs = self.cursor.rowcount """判断数据库表中数据所影响行数是否为1, 如果不是的话就进行异常抛出""" if rs != 1: raise Exception("Error of data inserting.") self.conn.rollback() self.conn.commit() except Exception as e: logging.waring(e) else: logging.warning(sql) finally: pass
def log(text: str, level: str) -> None: """logging using a central function Args: text (str): logging text level (str): logging level """ if level == Logging.ERROR.value: logging.error(text) elif level == Logging.DEBUG.value: logging.debug(text) elif level == Logging.WARNING.value: logging.waring(text) else: logging.info(text) if text and CFG.on_screen_print: print(text)
def run_Dspot_preconfig(POM_FILE, reposlug, timecap): # This is fixed if running maven Dspot plugin outputdir = "target/dspot/output" # If no pomfile found in the project root or no dspot plugin then it does # not support Dspot if not (os.path.isfile(POM_FILE) and check_Dspot_supported(POM_FILE)): return False logging.warning("PROJECT DOES SUPPORT DSPOT") logging.warning( exec_get_output([ 'mvn', '-f', 'clonedrepo', 'dspot:amplify-unit-tests', '-Dtest-criterion=TakeAllSelector', '-Diteration=1', '-Ddescartes', '-Dgregor' ])) # move files and cleanup after running exec_get_output( 'mv -t ' + outputdir + ' project.properties debug.log 2>/dev/null', True) # push to Mongodb when done # get database if mongo_connected: db = client['Dspot'] colname = reposlug.split('/')[1] + 'RootProject' + '-' + timecap col = db[colname] # get all output files but the binaries .class files files = exec_get_output( 'find ' + outputdir + ' -type f | grep -v .class', True).rstrip().split('\n') for file in files: f = open(file) # open a file text = f.read() # read the entire contents, should be UTF-8 text file_name = file.split('/')[-1] logging.warning('File to Mongodb: ' + file_name) text_file_doc = {"file_name": file_name, "contents": text} col.insert(text_file_doc) else: logging.waring("Nothing will be submit since mongodb is not connected") exec_get_output('cp -rf ' + outputdir + ' clonedrepo', True) exec_get_output('rm -rf NUL target/ clonedrepo/target', True) return True # Dspot was preconfigured
def get_headers(self): if not self.status: self.urlparsed = urlparse(self.url) self.date = datetime.today() h = httplib.HTTPConnection(self.urlparsed.netloc) try : if self.urlparsed.query != '': uri = ''.join([self.urlparsed.path,'?',self.urlparsed.query]) else: uri = self.urlparsed.path h.request('GET',uri) res = h.getresponse() self.__parse_headers(res.getheaders()) self.status = res.status except socket.gaierror: logging.warning('No response from server : %s', self.urlparsed.netloc) except httplib.InvalidURL: logging.waring('Invalid URL : %s', self.url) except: logging.warning('Unexpected error') raise
def sizeHint(self): """ Returns size needed to draw widget's content. """ #logging.debug(self.__class__.__name__ + ": sizeHint()") # arrangePorts() needed because it will be called in rearnangeContent() after sizeHint() self.arrangePorts() neededWidth = self.getDistance('leftMargin', 1) + self.getDistance('rightMargin', 1) neededHeight = self.getDistance('topMargin', 1) + self.getDistance('bottomMargin', 1) imageSizeF = self.imageSizeF() # width titleWidth = 0 if self.titleIsSet(): titleWidth = self.getDistance('titleFieldWidth', 1) bodyWidth = 0 sinkPortsWidth = 0 sourcePortsWidth = 0 if len(self.sinkPorts()) > 0: sinkPortsWidth = self.getDistance('leftMargin', 1) + PortWidget.WIDTH if len(self.sourcePorts()) > 0: sourcePortsWidth = self.getDistance('rightMargin', 1) + PortWidget.WIDTH if self._showPortNames: maxSinkTitleWidth = self._getMaxSinkTitleWidth() maxSourceTitleWidth = self._getMaxSourceTitleWidth() if self._portNamesPosition == self.PORT_NAMES_NEXT_TO_PORTS: bodyWidth += maxSinkTitleWidth + self.getDistance('rightMargin', 1) + maxSourceTitleWidth elif self._portNamesPosition == self.PORT_NAMES_ABOVE_PORTS: if maxSinkTitleWidth > PortWidget.WIDTH: sinkPortsWidth = 0#self.getDistance('leftMargin', 1) if maxSourceTitleWidth > PortWidget.WIDTH: sourcePortsWidth = 0#self.getDistance('rightMargin', 1) #bodyWidth += maxSinkTitleWidth + self.getDistance('rightMargin', 1) + maxSourceTitleWidth bodyWidth += maxSinkTitleWidth + maxSourceTitleWidth else: logging.waring(self.__class__.__name__ +": sizeHint() - "+ self.NO_VALID_PORT_NAMES_POSITION_MESSAGE) bodyWidth += sinkPortsWidth + sourcePortsWidth if self.textFieldIsSet(): bodyWidth += self.getDistance('textFieldWidth', 1) bodyWidth = max(imageSizeF.width() + self.getDistance("leftMargin", 1) + self.getDistance("rightMargin", 1), bodyWidth) neededWidth += max(titleWidth, bodyWidth) # height if self.titleIsSet(): neededHeight += self.getDistance('titleFieldHeight', 1) sinkPortsHeight = self.getPortsHeight("sink") / self.scale() sourcePortsHeight = self.getPortsHeight("source") / self.scale() textFieldHeight = 0 if self.textFieldIsSet(): textFieldHeight += self.textField().getHeight() neededHeight += max(sinkPortsHeight, sourcePortsHeight, textFieldHeight, imageSizeF.height()) if bodyWidth != 0: neededHeight += self.getDistance('bottomMargin', 1) # gap between header and body if self._showPortNames and (len(self.sinkPorts()) > 1 or len(self.sourcePorts()) > 1): neededHeight += self.getDistance('bottomMargin', 1) # additional gap for port names return QSize(neededWidth, neededHeight)
def sizeHint(self): """ Returns size needed to draw widget's content. """ #logging.debug(self.__class__.__name__ + ": sizeHint()") # arrangePorts() needed because it will be called in rearnangeContent() after sizeHint() self.arrangePorts() neededWidth = self.getDistance('leftMargin', 1) + self.getDistance( 'rightMargin', 1) neededHeight = self.getDistance('topMargin', 1) + self.getDistance( 'bottomMargin', 1) imageSizeF = self.imageSizeF() # width titleWidth = 0 if self.titleIsSet(): titleWidth = self.getDistance('titleFieldWidth', 1) bodyWidth = 0 sinkPortsWidth = 0 sourcePortsWidth = 0 if len(self.sinkPorts()) > 0: sinkPortsWidth = self.getDistance('leftMargin', 1) + PortWidget.WIDTH if len(self.sourcePorts()) > 0: sourcePortsWidth = self.getDistance('rightMargin', 1) + PortWidget.WIDTH if self._showPortNames: maxSinkTitleWidth = self._getMaxSinkTitleWidth() maxSourceTitleWidth = self._getMaxSourceTitleWidth() if self._portNamesPosition == self.PORT_NAMES_NEXT_TO_PORTS: bodyWidth += maxSinkTitleWidth + self.getDistance( 'rightMargin', 1) + maxSourceTitleWidth elif self._portNamesPosition == self.PORT_NAMES_ABOVE_PORTS: if maxSinkTitleWidth > PortWidget.WIDTH: sinkPortsWidth = 0 #self.getDistance('leftMargin', 1) if maxSourceTitleWidth > PortWidget.WIDTH: sourcePortsWidth = 0 #self.getDistance('rightMargin', 1) #bodyWidth += maxSinkTitleWidth + self.getDistance('rightMargin', 1) + maxSourceTitleWidth bodyWidth += maxSinkTitleWidth + maxSourceTitleWidth else: logging.waring(self.__class__.__name__ + ": sizeHint() - " + self.NO_VALID_PORT_NAMES_POSITION_MESSAGE) bodyWidth += sinkPortsWidth + sourcePortsWidth if self.textFieldIsSet(): bodyWidth += self.getDistance('textFieldWidth', 1) bodyWidth = max( imageSizeF.width() + self.getDistance("leftMargin", 1) + self.getDistance("rightMargin", 1), bodyWidth) neededWidth += max(titleWidth, bodyWidth) # height if self.titleIsSet(): neededHeight += self.getDistance('titleFieldHeight', 1) sinkPortsHeight = self.getPortsHeight("sink") / self.scale() sourcePortsHeight = self.getPortsHeight("source") / self.scale() textFieldHeight = 0 if self.textFieldIsSet(): textFieldHeight += self.textField().getHeight() neededHeight += max(sinkPortsHeight, sourcePortsHeight, textFieldHeight, imageSizeF.height()) if bodyWidth != 0: neededHeight += self.getDistance('bottomMargin', 1) # gap between header and body if self._showPortNames and (len(self.sinkPorts()) > 1 or len(self.sourcePorts()) > 1): neededHeight += self.getDistance( 'bottomMargin', 1) # additional gap for port names return QSize(neededWidth, neededHeight)
def _profiling(start,sql=''): t = time.time() - start if t > 0.1: logging.waring('[PROFILING][DB]%s:%s'%(t,sql)) else: logging.info('[PROFILING][DB]%S:%S'%(t,sql))
def do_add_image(argv): try: opts, args = getopt.getopt(argv, 'a:b:c:f:m:p:r:s:t:', [ 'azimuth=', 'building=', 'distance=', 'focal=', 'mask=', 'position=', 'region=', 'ssize=' ]) except getopt.GetoptError: logging.exception('输入的参数不正确') sys.exit(1) azimuth = .0 building = 'wuxin' distance = .0 mask = None position = .0, .0, .0 focal = None cmos = 3.6, 4.8 rindex = None for o, a in opts: if o in ('-a', '--azimuth'): azimuth = float(a) elif o in ('-b', '--building'): building = a elif o in ('-f', '--focal'): focal = float(a) elif o in ('-m', '--mask'): mask = [int(s) for s in a.split(',')] elif o in ('-p', '--position'): position = [float(s) for s in a.split(',')] elif o in ('-t', '--distance'): distance = float(a) elif o in ('-r', '--region'): rindex = int(a) elif o in ('-s', '--ssize'): cmos = [float(s) for s in a.split(',')] try: filename = args[0] except IndexError: logging.warning('缺少图像文件名称') return if not os.path.exists(filename): logging.warning('图像文件 %s 不存在', filename) return if focal is None: logging.waring('缺少相机参数') return if len(args[1:]): logging.warning('只有第一个图像文件 %s 被处理,其他文件都会被忽略', filename) if rindex is None: regions = get_region_list() rindex = find_region_by_position(regions, position) if rindex is None: logging.warning('位置 %s 对应的区域在数据库中不存在', position) return focal = [focal / x for x in cmos] add_image(building, position, azimuth, focal, mask, distance, filename)
def 步進(self, 防止終焉=False): if self.狀態.選項: return s = self.下一句() 類型 = s['類型'] if 類型 in ('註釋', '躍點'): self.步進() if 類型 == '函數調用': 命令(s).執行(self) if not self.狀態.選項: self.步進() if 類型 == '插入圖': logging.debug('插入圖: %s' % s['插入圖']) self.狀態.額外信息 = ('cut', s['插入圖']) if 類型 == '終焉': if 防止終焉: return '終焉' self.狀態.額外信息 = ('終焉', ) self.狀態.話語 = s['旁白'] self.狀態.名字 = '' self.狀態.語者 = '' if 類型 == '鏡頭': if s['鏡頭'] == '+': try: d = yaml.load(s['內容']) 鏡頭.生成鏡頭(d) except: logging.warning(f'鏡頭「{s["內容"]}」的內容不正確。') self.步進() elif s['鏡頭'] == '-': try: 鏡頭.解除鏡頭(yaml.load(s['內容'])) except: logging.waring(f'鏡頭「{s["內容"]}」的內容不正確。') self.步進() if 類型 == '旁白': self.狀態.話語 = s['旁白'] self.狀態.名字 = '' self.狀態.語者 = '' if 類型 == '人物操作': 人物名 = s['人物名'] 目標 = s['目標'] if s['操作符'] == '+': 角色.取角色(人物名).現衣 = s['目標'] if s['操作符'] == '|': 角色.取角色(人物名).顯示名字 = s['目標'] self.步進() if 類型 == '人物對話': 人物 = 角色.取角色(s['名']) 人物.現顏 = s['顏'] 人物.現特效 = s['特效'] 替代顯示名字 = 人物.顯示名字 if 鏡頭.查詢(s['名']) and self.狀態.人物 != s['名']: self.狀態.人物 = s['名'] self.狀態.話語 = s['語'] self.狀態.名字 = s['代'] or 替代顯示名字 or s['名'] self.狀態.語者 = s['名'] logging.debug([s['名'], s['代'], s['顏'], s['語']].__str__()) if 類型 == '人物表情': 人物 = 角色.取角色(s['名']) 人物.現顏 = s['顏'] 人物.現特效 = s['特效'] if 鏡頭.查詢(s['名']) and self.狀態.人物 != s['名']: self.狀態.人物 = s['名'] logging.debug([s['名'], s['代'], s['顏']].__str__()) self.步進()
def setFilterMsg(filterIndex, filterId, filterMask, frameType, filterStatus): #sendData = [0xAA, 0xAA, 0xEx, 0xFE, 0xFF, 0x01, 0x11, 0x22, 0x33, 0x44, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3F, 0x55, 0x55, 0xF0] #print("sendData={}".format(sendData)) if (filterIndex > 15): logging.waring("filterIndex {} is too large.".format(filterIndex)) return [] if (frameType.lower() != "std" and frameType.lower() != "ext"): logging.waring("frameType {} is invalid.".format(frameType)) return [] if (filterStatus.lower() != "enable" and filterStatus.lower() != "disable"): logging.waring("filterStatus {} is invalid.".format(filterStatus)) return [] sendData = [0xAA, 0xAA] id = 0xE0 + filterIndex sendData.append(id) crc = id id = 0xFE crc = crc + id sendData.append(id) id = 0xFF crc = crc + id sendData.append(id) id = 0x01 crc = crc + id sendData.append(id) idStr = "{:08x}".format(filterId) if (frameType.lower() == "std"): id = int(idStr[6:8], 16) sendData = insertCtrl(sendData, id) sendData.append(id) crc = crc + id id = int(idStr[4:6], 16) & 0x7 sendData = insertCtrl(sendData, id) sendData.append(id) crc = crc + id sendData.append(0) id = 0 # Disable if (filterStatus.lower() == "enable"): id = 0x80 # Enable sendData.append(id) crc = crc + id logging.debug("id={:02x}{:02x}".format(sendData[2], sendData[3])) else: id = int(idStr[6:8], 16) sendData = insertCtrl(sendData, id) sendData.append(id) crc = crc + id id = int(idStr[4:6], 16) sendData = insertCtrl(sendData, id) sendData.append(id) crc = crc + id id = int(idStr[2:4], 16) sendData = insertCtrl(sendData, id) sendData.append(id) crc = crc + id id = int(idStr[0:2], 16) & 0x1F if (filterStatus.lower() == "enable"): id = 0x80 # Enable sendData = insertCtrl(sendData, id) sendData.append(id) crc = crc + id logging.debug("id={:02x}{:02x}".format(sendData[2], sendData[3])) filterMaskStr = "{:08x}".format(filterMask) mask = int(filterMaskStr[6:8], 16) sendData = insertCtrl(sendData, mask) sendData.append(mask) crc = crc + mask mask = int(filterMaskStr[4:6], 16) sendData = insertCtrl(sendData, mask) sendData.append(mask) crc = crc + mask mask = int(filterMaskStr[2:4], 16) sendData = insertCtrl(sendData, mask) sendData.append(mask) crc = crc + mask mask = int(filterMaskStr[0:2], 16) & 0x1F if (frameType.lower() == "ext"): mask = mask + 0x40 # Extended sendData = insertCtrl(sendData, mask) sendData.append(mask) crc = crc + mask len = 8 sendData.append(len) # Frame Data Length crc = crc + len req = 0xFF sendData.append(req) crc = crc + req ext = 1 sendData.append(ext) # Standard/Extended frame crc = crc + ext rtr = 0 # Set sendData.append(rtr) # Set/Read crc = crc + rtr crc = crc & 0xff logging.debug("crc={:2x}".format(crc)) sendData = insertCtrl(sendData, crc) sendData.append(crc) sendData.append(0x55) sendData.append(0x55) logging.debug("sendData={}".format(sendData)) return sendData
def getPageData (url, lotto, set): try: # Follow URL response = urllib2.urlopen (url) # GET HTML page data html = response.read () # Spruce up the soup parsedHTML = BeautifulSoup (html, "lxml") return (parsePageData (parsedHTML, lotto, set)) except urllib2.HTTPError, e: # HTTP Error logging.warning ("HTTPError = %s" % str(e.code)) return (None) except urllib2.URLError, e: # URL Error logging.waring ("URLError = %s" % str(e.code)) return (None) except Exception, e: logging.warning ("Something happened: %s" % e) return (None) #endregion Web #region Console Control #region DS ##################################################################################################################### ## Asks the user which of the state's valid years they would like to download lotto data from. ## ## ## ## -> LottoSource ls : Data regarding the chosen state (name, link, year list). ## ## ## ## <- Tuple (String WEB_PATH, : Constructed string link for connecting to the requested page. ## ## LottoSet) : Lotto set data to be sent to be filled with number data ##
def do_add_depth_image(argv): try: opts, args = getopt.getopt(argv, 'a:b:d:f:o:p:r:', [ 'azimuth=', 'building=', 'depth=', 'focal=', 'offset=', 'position=', 'rotate=' ]) except getopt.GetoptError: logging.exception('输入的参数不正确') sys.exit(1) azimuth = .0 building = 'wuxin' depth_filename = None position = .0, .0, .0 offset = .0, .0, .0 rotate = 0 focal = None for o, a in opts: if o in ('-a', '--azimuth'): azimuth = float(a) elif o in ('-b', '--building'): building = a elif o in ('-d', '--depth'): depth_filename = a elif o in ('-f', '--focal'): focal = [float(s) for s in a.split(',')] elif o in ('-o', '--offset'): offset = [float(s) for s in a.split(',')] elif o in ('-p', '--position'): position = [float(s) for s in a.split(',')] elif o in ('-r', '--rotate'): rotate = float(a) try: filename = args[0] except IndexError: logging.warning('缺少图像文件名称') return if not os.path.exists(filename): logging.warning('图像文件 %s 不存在', filename) return if depth_filename is None: logging.warning('缺少深度图像文件') return if not os.path.exists(depth_filename): logging.warning('深度图像文件 %s 不存在', depth_filename) return if focal is None: logging.waring('缺少相机参数') return regions = get_region_list() rindex = find_region_by_position(regions, position) if rindex is None: logging.warning('位置 %s 对应的区域在数据库中不存在', position) return add_depth_image(building, position, azimuth, focal, rotate, offset, depth_filename, filename)
def doBalance(self): self._cancelAllOrder() self.currentTotalBalance = self.getAllBalance() # stockDiff=self._floatFloor((self.currentTotalBalance['stocks']['total']-self.initTotalBalance['stocks']['total']),2) stockDiff = self._floatFloor(self.currentTotalBalance['stocks'][ 'total'] - self.initTotalBalance['stocks']['total'], 3) if abs(stockDiff) > 1.11 * self.maxAmount: self.isNormal = False logging.warning("仓位变动异常!仓位差:%f, 停止交易", stockDiff) sys.exit() if abs(stockDiff) < self.minAmount: self.isBalance = True else: orderAmount = 0 logging.info('初始币总数量:%s; 现在币总数量:%s; 差额:%s', self.initTotalBalance['stocks'][ 'total'], self.currentTotalBalance['stocks']['total'], stockDiff) maxBid, maxBidAmount, maxPair, minAsk, minAskAmount, minPair = self.getMaxSpread() if (maxPair == None) or (minPair == None) or maxBidAmount == 0 or minAskAmount == 0: # 未筛选出对冲交易所, 返回 return if (not self._isPriceNormal(maxBid)) or (not self._isPriceNormal(minAsk)): # 价格不正常, 返回 logging.warning( 'The price is unormal, jump over this cycle......') return if abs(maxPair.depth['timestamp'] - minPair.depth['timestamp']) > self.maxLagTime * 1000 or abs(time.time() * 1000 - maxPair.depth['timestamp']) > self.maxLagTime * 1000: # 深度信息时间差大于阈值则不操作 logging.warning( 'the time lag between different exchanges is bigger than 3 seconds!!!!!') return # orderbook 价格不正常, 返回 if maxPair.depth['asks'][0][0] < maxPair.depth['bids'][0][0] or maxPair.depth['asks'][0][0] > maxPair.depth['asks'][1][0] \ or minPair.depth['asks'][0][0] < minPair.depth['bids'][0][0] or minPair.depth['asks'][0][0] > minPair.depth['asks'][1][0]: logging.warning( 'Order Book Information is unormal, jump over this cycle......') return # self.lastOpAmount=stockDiff if stockDiff > 0: orderAmount = self.adjustAmountFloor( min(stockDiff, maxPair.balance['stocks']['free'])) if self.useMarketOrder: orderPrice = self._floatFloor( maxBid * (1 - self.slideP / 100), 8) logging.info('仓位平衡中, 交易所%s市价卖出币数量%s, 挂单价:%s', maxPair.id, orderAmount, orderPrice) else: orderPrice = self._floatFloor(maxPair.depth['asks'][0][ 0] - (maxPair.depth['asks'][0][0] - maxPair.depth['bids'][0][0]) / self.slidingRatio, 8) logging.info('仓位平衡中, 交易所%s限价卖出币数量%s, 挂单价:%s', maxPair.id, orderAmount, orderPrice) try: order = maxPair.exchange.createLimitSellOrder( maxPair.symbol, orderAmount, orderPrice) logging.info('Order %s placed in %s to balance position: %s', order[ 'id'], maxPair.id, order['info']) except Exception as e: logging.error("%s placing order Error: %s", maxPair.id, e) else: stockDiff = abs(stockDiff) if self.useMarketOrder: orderPrice = self._floatCeil( minAsk * (1 + self.slideP / 100), 8) canBuyAmount = minPair.balance['balance'][ 'free'] / orderPrice # 交易所余钱能买多少币 orderAmount = self.adjustAmountRound(stockDiff) if ( canBuyAmount - stockDiff) > self.minAmount else self.adjustAmountFloor(min(stockDiff, canBuyAmount)) logging.info('仓位平衡中, 交易所%s市价买入币数量%s, 挂单价:%s', minPair.id, orderAmount, orderPrice) else: orderPrice = self._floatFloor(minPair.depth['bids'][0][ 0] + (minPair.depth['asks'][0][0] - minPair.depth['bids'][0][0]) / self.slidingRatio, 8) canBuyAmount = minPair.balance['balance'][ 'free'] / orderPrice # 交易所余钱能买多少币 orderAmount = self.adjustAmountRound(stockDiff) if ( canBuyAmount - stockDiff) > self.minAmount else self.adjustAmountFloor(min(stockDiff, canBuyAmount)) orderPrice = self._floatFloor(maxPair.depth['asks'][0][ 0] - (maxPair.depth['asks'][0][0] - maxPair.depth['bids'][0][0]) / self.slidingRatio, 8) logging.info('仓位平衡中, 交易所%s限价买入币数量%s, 挂单价:%s', minPair.id, orderAmount, orderPrice) try: order = minPair.exchange.createLimitBuyOrder( minPair.symbol, orderAmount, orderPrice) logging.info('Order %s placed in %s to balance position: %s', order[ 'id'], minPair.id, order['info']) except Exception as e: logging.error("%s placing order Error: %s", minPair.id, e) if self.isBalance and self.lastOpAmount: currentProfit = self.getProfit() logging.info('Total Profit: %s; This time profit:%s, Spread: %s; Balance: %s, Stocks: %s.', currentProfit, (currentProfit - self.lastProfit), (currentProfit - self.lastProfit) / self.lastOpAmount, self.currentTotalBalance['balance']['total'], self.currentTotalBalance['stocks']['total']) if self.stop_when_loss and currentProfit < 0 and abs(currentProfit) > self.max_loss: logging.warning('交易亏损超过最大限度, 程序取消所有订单后退出!') self._cancelAllOrder() logging.waring('已停止!!!!') sys.exit() self.lastProfit = currentProfit return
def warning(msg): logging.waring('[{}] {}'.format( strftime('%Y-%m-%d %H:%M:%S', localtime()), msg))
def run_Dspot_autoconfig(reposlug, timecap): # Find project roots # Normal project with only one root roots = exec_get_output( 'find clonedrepo/ -maxdepth 2 -mindepth 0 -type f -name "pom.xml"', True).rstrip().split('\n') # Project with multiroots like repairnator if isinstance(roots, str): roots = [roots] if roots == '': logging.warning('Unsupported project structure') exit(1) # find modules related to root. # the first path in the list is always in fact the path to the root. # loop through roots paths = [] for root in roots: # This need to be executed as shell=True otherwise it will not work paths.append( sorted( exec_get_output( 'mvn -q -f ' + root + ' --also-make exec:exec -Dexec.executable="pwd"', True).rstrip().split('\n'))) # Goal is to auto config dspot.config # dspot.properties is the standard form for dspot. # We use this to configure new config files. for listln in paths: root_path = listln[0] root_name = root_path.split('/')[-1] project_path = exec_get_output('realpath --relative-to=. ' + root_path, True).strip() # default values of module if there are no modules(a.k.a not a # multimodules project) module_path = '.' module_name = '' logging.warning("rootname: " + root_name + " rootpath: " + root_path) # ignore the first index 0 since it's the root path # this check if it's a multimodules projectx if len(listln) > 1: logging.warning("MULTI PROJECTS FOUND") for i in range(1, len(listln)): # Check if the module has tests otherwise move on to the next # module (nothing to amplify) module_name = (listln[i]).split('/')[-1] module_path = exec_get_output( 'realpath --relative-to=' + root_path + ' ' + listln[i], True) logging.warning("Running Dspot on rootname: " + root_name + " rootpath: " + module_name) if os.path.exists(listln[i] + '/src/test/java'): outputdir = 'dspot-out/' + root_name + '_' + module_name + '/' print('project_path: ' + project_path) print('module_path: ' + module_path) JAVA_VERSION = find_JAVA_VERSION(project_path + '/pom.xml') configure(module_name, module_path, root_name, project_path, outputdir, JAVA_VERSION) logging.warning('Running Dspot') logging.warning( exec_get_output([ 'java', '-jar', 'dspot-2.1.0-jar-with-dependencies.jar', '--path-to-properties', 'project.properties', '--test-criterion', 'TakeAllSelector', '--iteration', '1', '--descartes', '--gregor' ])) # move properties file to outputdir when done . exec_get_output( 'mv -t ' + outputdir + ' project.properties debug.log 2>/dev/null', True) # also clean up after each run exec_get_output('rm -rf NUL target/', True) else: logging.warning(root_name + " module: " + module_name + " ignored since no tests were found") else: # Check if the module has tests otherwise move on to the next module # (nothing to amplify) if os.path.exists(root_path + '/src/test/java'): logging.warning("Running Dpot on rootname: " + root_name) outputdir = 'clonedrepo/dspot-out/RootProject' JAVA_VERSION = find_JAVA_VERSION(project_path + '/pom.xml') configure(module_name, module_path, root_name, project_path, outputdir, JAVA_VERSION) logging.warning('Running Dspot') logging.warning( exec_get_output([ 'java', '-jar', 'dspot-2.1.0-jar-with-dependencies.jar', '--path-to-properties', 'project.properties', '--test-criterion', 'TakeAllSelector', '--iteration', '1', '--descartes', '--gregor' ])) # move properties file to outputdir when done . exec_get_output( 'mv -t ' + outputdir + ' project.properties debug.log 2>/dev/null', True) # also clean up after each run exec_get_output('rm -rf NUL target/', True) else: logging.warning(root_name + " ignored due to no tests found") # Save files in mongodb # get database if mongo_connected: db = client['Dspot'] # insert all docs in a directory into database # List all directories in clonedrepo/dspot-out/ folder dirs = exec_get_output( 'find clonedrepo/dspot-out/ -maxdepth 1 -mindepth 1 -type d', True).rstrip().split('\n') if isinstance(dirs, str): dirs = [dirs] for dir in dirs: # extract directory name and use it as the colectioname for dirname = dir.split('/')[-1] colname = reposlug.split('/')[1] + dirname + '-' + timecap logging.warning(dir) col = db[colname] # get all files path in dir, but not the binaries .class files files = exec_get_output( 'find ' + dir + ' -type f | grep -v .class', True).rstrip().split('\n') for file in files: f = open(file) # open a file text = f.read( ) # read the entire contents, should be UTF-8 text file_name = file.split('/')[-1] logging.warning('File to Mongodb: ' + file_name) text_file_doc = {"file_name": file_name, "contents": text} col.insert(text_file_doc) else: logging.waring("Nothing will be submit since mongodb is not connected")