def loadMilestone(self, filename, rID): print 'loadMilestone ' + filename #if len(self.record_milestone) != 0 and self.record_milestone.has_key(rID): # return name = 'extensions/milestone/data/' + filename + '-milestone' record_milestone_back = {} if os.path.exists(name): f = open(name, 'rU') all_lines = f.readlines() for line in all_lines: if line.startswith('#'): continue record = Record(line) key = record.get_id().strip() if key != rID: continue if record_milestone_back.has_key(key): record_milestone_back[key].append(record) else: record_milestone_back[key] = [record] if record_milestone_back.has_key(rID) and len(record_milestone_back[rID]) > 0: #if len(record_milestone_back[rID]) > 20: record_milestone_back[rID] = reversed(record_milestone_back[rID]) self.record_milestone[rID] = record_milestone_back[rID]
def __init__(self, year, lines): """ Constructor @param year - the year to tag the job with @param lines - the strings making up this record """ Record.__init__(self, year, lines)
def read(data): lines = data.splitlines() records = [] current_line = lines.pop(0) records.append(Record.extract(current_line, PRESENTER_HEADER_RECORD)) current_line = lines.pop(0) records.append(Record.extract(current_line, ORDERING_HEADER_RECORD)) current_line = lines.pop(0) while lines: if Record.valid(current_line, REQUIRED_INDIVIDUAL_RECORD): record = Record.extract(current_line, REQUIRED_INDIVIDUAL_RECORD) elif Record.valid(current_line, OPTIONAL_RECORD): record = Record.extract(current_line, OPTIONAL_RECORD) elif Record.valid(current_line, ORDERING_FOOTER_RECORD): record = Record.extract(current_line, ORDERING_FOOTER_RECORD) elif Record.valid(current_line, PRESENTER_FOOTER_RECORD): record = Record.extract(current_line, PRESENTER_FOOTER_RECORD) else: raise Exception('Invalid record: %s' % current_line) records.append(record) current_line = lines.pop(0) return records
def __init__(self, year, lines): """ Constructor @param year - the year to tag the job with @param lines - the strings making up this record """ Record.__init__(self, year, lines) pat = "Image size of job updated: (?P<imageSize>[\d]+)" ## size of the image for this job self.imageSize = int(self.extract(pat,lines[0], "imageSize")) ## memory used by this job in MB self.memoryUsageMb = 0 ## resident size of this job in KB self.residentSetSizeKb = 0 if len(lines) == 3: pat = "(?P<memoryUsage>[\d]+)" self.memoryUsageMb = int(self.extract(pat,lines[1],"memoryUsage")) pat = "(?P<residentSetSize>[\d]+)" self.residentSetSizeKb = int(self.extract(pat,lines[2],"residentSetSize")) else: pat = "(?P<residentSetSize>[\d]+)" self.residentSetSizeKb = int(self.extract(pat,lines[1],"residentSetSize"))
def __init__(self, year, lines): """ Constructor @param year - the year to tag the job with @param lines - the strings making up this record """ Record.__init__(self, year, lines) pat = r"Error from (?P<slot>[\w]+@[\d]+@[\w\-.]+): (?P<reason>.+?)($)" ## the slot the job was in at the time of this exception self.slot = None ## the reason for the exception self.reason = None ## the number of bytes sent self.runBytesSent = None ## the number of bytes received self.runBytesReceived = None if re.search(pat,lines[1]) is not None: values = self.extractValues(pat,lines[1]) self.slot = values["slot"] self.reason = values["reason"].strip() pat = r"(?P<bytes>\d+) " self.runBytesSent = int(self.extract(pat,lines[2],"bytes")) self.runBytesReceived = int(self.extract(pat,lines[3],"bytes")) else: self.reason = lines[1].strip() pat = r"(?P<bytes>[\d]+)" self.runBytesSent = int(self.extract(pat,lines[2],"bytes")) self.runBytesReceived = int(self.extract(pat,lines[3],"bytes"))
def __init__(self, builder, is_revise_mode = False): """ is_revise_mode: 是否是复习模式 """ self.builder = builder self.is_revise_mode = is_revise_mode self.is_new_word = False # 是一个新的单词吗? self.is_correct_mode = False # 是否是改正模式? self.is_first_time = True # 是否第一次测试? self.passed = [] # 通过的单词 self.failed = [] # 没有通过的单词 self.typing_sound = True self.sound_read_word = True self.sound_check_answer = True self.__connect_gui() # 界面连接 # 获得单词队列 re = Record() words_list = re.get_current_record() self.words_len = len(words_list) self.queue = [] for i in range(SPELLING_TIMES): temp = words_list[:] random.shuffle(temp) self.queue += temp self.queue_len = len(self.queue) self.__start_testing()
def add(self, number_entities, records_per_entity): """ Adds additional entities to the database :param number_entities: :param records_per_entity: """ if len(self.labels) != len(self.database.records): raise Exception('Number of records and labels do not match') current_max_record_id = 0 current_max_entity_id = 0 for (record_id, _), (__, entity_id) in izip(self.database.records.iteritems(), self.labels.iteritems()): if record_id > current_max_record_id: current_max_record_id = record_id if entity_id > current_max_entity_id: current_max_entity_id = entity_id record_index = current_max_record_id+1 for entity_index in range(current_max_entity_id+1, current_max_entity_id+1+number_entities): features = np.random.rand(self.database.feature_descriptor.number) features = features.astype(str) number_records = records_per_entity for _ in range(number_records): r = Record(record_index, self.database.feature_descriptor) r.initialize_from_annotation(features) self.database.records[record_index] = r self.labels[record_index] = entity_index record_index += 1
def main(): arguements = docopt( data_io.set_up_doc(__doc__), version='0.1' ) record = Record(debug=arguements['--debug']) if arguements['<data>']: # translation from_lang, to_lang, data = _extract(arguements) # translate data translator = Translator(from_lang, to_lang, data, debug=arguements['--debug']) # result is a dictionary contains decoded infomation of the # trnaslation. result = translator.translate() translator.display_result(result) # add record record.add(from_lang, to_lang, data, result) elif arguements['--record']: # display record record.display() else: raise Exception('No Implemented Yet.')
def load(self, ids, modified=False): if not ids: return True if len(ids) >= 1: self.lock_signal = True new_records = [] for id in ids: new_record = self.get(id) if not new_record: new_record = Record(self.model_name, id, group=self) self.append(new_record) new_record.signal_connect(self, "record-changed", self._record_changed) new_record.signal_connect(self, "record-modified", self._record_modified) new_records.append(new_record) # Remove previously removed or deleted records for record in self.record_removed[:]: if record.id in ids: self.record_removed.remove(record) for record in self.record_deleted[:]: if record.id in ids: self.record_deleted.remove(record) if self.lock_signal: self.lock_signal = False self.signal("group-cleared") if new_records and modified: new_records[0].signal("record-modified") new_records[0].signal("record-changed") self.current_idx = 0 return True
def __init__(self, number_entities, records_per_entity, number_features=2): """ Initializes synthetic database No initial corruption, so records from the same cluster have exact same features :param number_entities: :param records_per_entity: Number of records per entity :param number_features: """ indices = range(0, number_features) names = ['Name_{0}'.format(s) for s in indices] types = ['float' for _ in indices] strengths = ['weak' for _ in indices] blocking = ['' for _ in indices] pairwise_uses = ['numerical_difference' for _ in indices] self.labels = dict() # [record identifier, cluster label] self.database = Database() feature_descriptor = FeatureDescriptor(names, types, strengths, blocking, pairwise_uses) self.database.feature_descriptor = feature_descriptor record_index = 0 for entity_index in range(0, number_entities): features = np.random.rand(feature_descriptor.number) features = features.astype(str) number_records = records_per_entity for _ in range(number_records): r = Record(record_index, feature_descriptor) r.initialize_from_annotation(features) self.database.records[record_index] = r self.labels[record_index] = entity_index record_index += 1
def __init__(self, year, lines): """ Constructor @param year - the year to tag the job with @param lines - the strings making up this record """ Record.__init__(self, year, lines) ## reason for eviction self.reason = lines[1].strip() pat = r"\((?P<term>\d+)\) Job was not checkpointed." ## termination code self.term = self.extract(pat, lines[1], "term") userRunRemoteUsage, sysRunRemoteUsage = self.extractUsrSysTimes(lines[2]) ## remote user run usage time self.userRunRemoteUsage = userRunRemoteUsage ## remote sys run usage time self.sysRunRemoteUsage = sysRunRemoteUsage userRunLocalUsage, sysRunLocalUsage = self.extractUsrSysTimes(lines[3]) ## local user run usage time self.userRunLocalUsage = userRunLocalUsage ## local sys run usage time self.sysRunLocalUsage = sysRunLocalUsage pat = r"(?P<bytes>\d+) " ## bytes sent during the run self.runBytesSent = int(self.extract(pat,lines[4], "bytes")) ## bytes received during the run self.runBytesReceived = int(self.extract(pat,lines[5], "bytes")) pat = r"Partitionable Resources :\s+Usage\s+\Request\s+Allocated$" ## disk usage self.diskUsage = None ## disk requested self.diskRequest = None ## memory usage self.memoryUsage = None ## memory requested self.memoryRequest = None ret = re.search(pat, lines[6]) if ret is None: self.diskUsage, self.diskRequest = self.extractUsageRequest(lines[8]) self.memoryUsage, self.memoryRequest = self.extractUsageRequest(lines[9]) else: self.diskUsage, self.diskRequest, allocated = self.extractUsageRequestAllocated(lines[8]) self.memoryUsage, self.memoryRequest, allocated = self.extractUsageRequestAllocated(lines[9])
def test_leading_underscores(self): 'leading underscores' self.assert_(Record.define('Foo', ('_foo',))) self.assert_(Record.define('Foo', ('foo',), allow_leading_underscores=False)) self.assertRaises(record.InvalidNameError, Record.define, 'Foo', ('_foo',), allow_leading_underscores=False)
def next(self): if self._index == self._total: raise StopIteration record = Record(self._simplemodel) record.setColumnProperties(self._data[self._index]) self._index += 1 return record
def __init__(self): QWidget.__init__(self) Record.__init__(self) threading.Thread.__init__(self) # self.record = Record() self.btn_begin_record = QtGui.QPushButton(self) self.btn_stop_record = QtGui.QPushButton(self) self.btn_sel_save_dir= QtGui.QPushButton(self) self.btn_postfix= QtGui.QPushButton(self) self.layout= QtGui.QGridLayout(self) self.lin_show_dir= QtGui.QLineEdit(self) self.lab_save_file_dir= QtGui.QLabel(self) self.lab_save_name= QtGui.QLabel(self) self.lin_save_name= QtGui.QLineEdit(self) self.lab_begin_record_time= QtGui.QLabel(self) self.lin_begin_record_time= QtGui.QLineEdit(self) self.lab_stop_record_time= QtGui.QLabel(self) self.lin_stop_record_time= QtGui.QLineEdit(self) self.lin_record_time= QtGui.QLineEdit(self) self.lab_show_record_time= QtGui.QLabel(self) self.record_time=0 self.lab_begin_record_time.setText(u'开始录音时间:') self.lab_stop_record_time.setText(u'停止录音时间:') self.btn_begin_record.setText(u'开始录音') self.btn_stop_record.setText(u'停止录音') self.btn_sel_save_dir.setText(u'...') self.btn_postfix.setText(u'.wav') self.lab_save_file_dir.setText(u'录音保存路径:') self.lab_save_name.setText(u'录音保存名字:') self.lab_show_record_time.setText(u'录音时长:') self.lin_show_dir.setText(os.getcwd()) self.btn_begin_record.setFixedWidth(120) self.btn_stop_record.setFixedWidth(120) self.lab_save_file_dir.setFixedWidth(120) self.btn_sel_save_dir.setFixedWidth(20) self.layout.addWidget(self.lab_save_file_dir,0,0,1,1) self.layout.addWidget(self.lin_show_dir,0,1,1,1) self.layout.addWidget(self.btn_sel_save_dir,0,2,1,1) self.layout.addWidget(self.lab_save_name,1,0,1,1) self.layout.addWidget(self.lin_save_name,1,1,1,1) self.layout.addWidget(self.btn_postfix,1,2,1,1) self.layout.addWidget(self.lab_begin_record_time,2,0,1,1) self.layout.addWidget(self.lin_begin_record_time,2,1,1,1) self.layout.addWidget(self.lab_stop_record_time,3,0,1,1) self.layout.addWidget(self.lin_stop_record_time,3,1,1,1) self.layout.addWidget(self.lab_show_record_time,4,0,1,1) self.layout.addWidget(self.lin_record_time,4,1,1,1) self.layout.addWidget(self.btn_begin_record,5,0,1,1) self.layout.addWidget(self.btn_stop_record,5,1,1,1) self.setLayout(self.layout) QtCore.QObject.connect(self.btn_sel_save_dir, QtCore.SIGNAL(QtCore.QString.fromUtf8("clicked()")),self.onSelectDir) QtCore.QObject.connect(self.btn_begin_record, QtCore.SIGNAL(QtCore.QString.fromUtf8("clicked()")),self.onBeginRecord) QtCore.QObject.connect(self.btn_stop_record, QtCore.SIGNAL(QtCore.QString.fromUtf8("clicked()")),self.onStopRecord) QtCore.QMetaObject.connectSlotsByName(self)
def __init__(self, year, lines): """ Constructor @param year - the year to tag the job with @param lines - the strings making up this record """ Record.__init__(self, year, lines) ## the reason for the failure self.reason = lines[1].strip()+";"+lines[2].strip()
def get(self, **opts): data = self._read(**opts) if not data: data = {} record = Record(simplemodel=self) record.load(data) return record
def __init__(self, record=None): Record.__init__(self, 'application/vnd.bluetooth.ep.oob') self.device_address = '00:00:00:00:00:00' self.eir = dict() if record is not None: if not record.type == self.type: raise ValueError("record type mismatch") self.name = record.name self.data = record.data
def compare_write_reports2(self, printer, sap): """ :type printer: list of PrinterRecord :type sap: list of SapRecord :return: """ tax_diff_by_tax = {} f = open(self.args["out"], 'wt') output = csv.writer(f) output.writerow(('id', 'status', 'message', 'comment', 'tax code diff', 'tax diff', 'taxes by tax', 'tax sum')) both, only_printer, only_sap = self.get_common_elements(printer, sap) # printer_keys = set(printer.keys()) for refNum in only_printer: for tax in printer[refNum].tax_sum_by_tax: output.writerow((refNum, Record.STATUS_BAD, Record.MESSAGE_ONLY_PRINTER, None, tax, printer[refNum].tax_sum_by_tax[tax], printer[refNum].tax_sum_by_tax, printer[refNum].total_tax_sum)) for refNum in only_sap: for tax in sap[refNum].tax_sum_by_tax: if tax == SapRecord.TAX_TECHNICAL_CODE and abs(sap[refNum].tax_sum_by_tax[tax]) < eps: # skip when contains technical code and tax continue output.writerow((refNum, Record.STATUS_BAD, Record.MESSAGE_ONLY_SAP, None, tax, abs(sap[refNum].tax_sum_by_tax[tax]), sap[refNum].tax_sum_by_tax, sap[refNum].total_tax_sum)) for refNum in both: messages = Record.equal_records(printer[refNum], sap[refNum]) if len(messages): for message in messages: tax_symbol = message["tax_symbol_err"] # liczenie sumy roznic podatkow tax_diff = None if tax_symbol: # tax_diff = Record.round(abs(abs(printer[refNum].tax_sum_by_tax.get(tax_symbol, 0)) - abs(sap[refNum].tax_sum_by_tax.get(tax_symbol, 0)))) tax_diff = Record.round(printer[refNum].tax_sum_by_tax.get(tax_symbol, 0) - abs(sap[refNum].tax_sum_by_tax.get(tax_symbol, 0))) tax_sum = tax_diff_by_tax.get(tax_symbol, 0) + tax_diff tax_diff_by_tax[tax_symbol] = tax_sum output.writerow((refNum, message["status"], message["message"], message["comment"], tax_symbol, tax_diff, (printer[refNum].tax_sum_by_tax, sap[refNum].tax_sum_by_tax), (printer[refNum].total_tax_sum, sap[refNum].total_tax_sum))) else: b = 'blad?' for tax in tax_diff_by_tax: output.writerow((tax, Record.round(tax_diff_by_tax[tax]))) print '\nliczba wspolnych - ' + str(len(both)) print 'liczba na drukarce - ' + str(len(only_printer)) print 'liczba w sap - ' + str(len(only_sap))
def post(self, key = None): record = None if not key: key = self.request.get('key') if key: record = Record.get_by_key_name(key) if not record: record = Record(key_name = key, value = self.request.get('value')) else: record.value = self.request.get('value') record.put() self.response.out.write(key)
def __init__(self, annotation_path=None, header_path=None, max_records=np.Inf, precomputed_x2=None): """ :param annotation_path: String, path to annotation file :param header_path: String, path to header info (if not included in the annotations file) :param max_records: Int, number of records to load from annotation file :param precomputed_x2: Precomputed weak features (smaller valued feature is better) A dict[(id1, id2)] = 1D vector, where id2 >= id1 """ self.records = dict() if annotation_path: ins = open(annotation_path, 'r') if header_path: header_ins = open(header_path, 'r') feature_names = next(header_ins).strip('\n').split(',') # skip the first line, its a header feature_types = next(header_ins).strip('\n').split(',') # variable type (e.g. int, string, date) feature_strengths = next(header_ins).strip('\n').split(',') blocking = next(header_ins).strip('\n').split(',') pairwise_uses = next(header_ins).strip('\n').split(',') feature_names_check = next(ins).strip('\n').split(',') if feature_names != feature_names_check: raise Exception('Header feature names and annotation feature names do not match') header_ins.close() else: feature_names = next(ins).strip('\n').split(',') # skip the first line, its a header feature_types = next(ins).strip('\n').split(',') # variable type (e.g. int, string, date) feature_strengths = next(ins).strip('\n').split(',') blocking = next(ins).strip('\n').split(',') pairwise_uses = next(ins).strip('\n').split(',') ignore_indices = find_in_list(feature_types, 'ignore') feature_names = remove_indices(ignore_indices, feature_names) feature_types = remove_indices(ignore_indices, feature_types) feature_strengths = remove_indices(ignore_indices, feature_strengths) blocking = remove_indices(ignore_indices, blocking) pairwise_uses = remove_indices(ignore_indices, pairwise_uses) self.feature_descriptor = FeatureDescriptor(feature_names, feature_types, feature_strengths, blocking, pairwise_uses) # Loop through all the records for line_index, sample in enumerate(ins): print 'Extracting sample', line_index r = Record(line_index, self.feature_descriptor) # record object from utils features = remove_indices(ignore_indices, sample.rstrip('\n').split(',')) try: r.initialize_from_annotation(features) except: print 'Unable to parse:', sample self.records[line_index] = r if line_index >= max_records-1: break ins.close() else: self.feature_descriptor = None self._precomputed_x2 = precomputed_x2
def getStartupPorjects(self, path): data = {} if os.path.exists(path): f = open(path, 'rU') for line in f.readlines(): record = Record(line) key = record.get_title().replace(' ', '').replace('.', '').strip() url = 'https://github.com/' + key data[key.lower()] = url if len(data) > 0: self.getProjectByDict(data, 'eecs/projects/github/organization/')
def collectLogs(self, ids=None, filter=None): """ Walk through all log records, collect those that passed the filter function matching. Return a generator which yields Record instances. """ if not filter: filter = lambda record: True if not ids: ids = Record.allIds() for id in ids: record = Record.load(id) if filter(record): yield record
def __init__(self, year, lines): """ Constructor @param year - the year to tag the job with @param lines - the strings making up this record """ Record.__init__(self, year, lines) pat = r"\<(?P<hostAddr>\S+)\>" values = re.search(pat,lines[0]).groupdict() ## internet address of the host self.executingHostAddr = values["hostAddr"]
def test_equal_records(self): a = 'g' equals = self.equals for elem in equals: result = Record.equal_records(equals[elem]["printer"], equals[elem]["sap"]) exp = result, equals[elem]["expected"] self.assertEqual(result, equals[elem]["expected"])
def read(data): lines = data.splitlines() records = [] current_line = lines.pop(0) records.append(Record.extract(current_line, PRESENTER_HEADER_RECORD)) current_line = lines.pop(0) while lines: if Record.valid(current_line, DETAIL_RECORD): record = Record.extract(current_line, DETAIL_RECORD) else: raise Exception('Invalid record: %s' % current_line) records.append(record) current_line = lines.pop(0) return records
def __init__(self, record=None): Record.__init__(self, 'application/vnd.wfa.wsc') self._version = '\x20' self._passwords = list() self._other = list() if record: if not record.type == self.type: raise ValueError("record type mismatch") self.name = record.name self.data = record.data else: self._passwords.append({ 'public-key-hash': 20 * '\x00', 'password-id' : 0, 'password' : '', })
def setUp(self): self._features_full = '9552601,neworleans,2014-01-30 02:41:11,Louisiana,New Orleans,8,' \ '0,haley,22,60/80/100,66,110.5,DD,30,26,29,caucasian,white,blue,brunette,' \ 'rest_type,rest_ethnicity,res_age,9802534087;5182561877,NC;NY,charlotte;' \ 'albany,[email protected],www.johnsmith.com,johnsmithmedia,' \ 'Louisiana_2014_1_30_1391067671000_6_0.jpg;Louisiana_2014_1_30_1391067671000_6_1.jpg;' \ 'Louisiana_2014_1_30_1391067671000_6_2.jpg;Louisiana_2014_1_30_1391067671000_6_3.jpg;' \ 'Louisiana_2014_1_30_1391067671000_6_4.jpg;Louisiana_2014_1_30_1391067671000_6_5.jpg' self._features_full = self._features_full.split(',') feature_types = ('int,string,date,string,string,int,int,string,int,string,int,float,string,int,int,int,' 'string,string,string,string,,,,int,string,string,string,string,string,' 'string').split(',') feature_descriptor = FeatureDescriptor('', feature_types, '', '', '') feature_descriptor.number = len(feature_types) self._r0 = Record(0, feature_descriptor) self._r1 = Record(0, feature_descriptor)
def preActionOfEdit(self, elements): """ Convert data before editing """ conv = Record.getConv('time', toRecord=False) elements['time'] = conv(elements['time']) elements['comment'] = elements['comment'].encode() return elements
def add(self, interactive=False, **fields): """ Add a log record to the system When interactive is True, ask data for subject, time, scene, people, tag, and log data from the use interactively, the provided arguments are used as the default value for user's choice. """ if interactive: fields = self.collectLogInfo(**fields) author = '%s <%s>' % (self.config['authorName'], self.config['authorEmail']) fields['author'] = author assert self.checkRequirement(**fields), "field data not sufficient" fields = Record.engine.convertFields(fields.items()) record = Record(**fields) record.save()
def load_record(self, name): try: record = Record.load(name) except RecordNotExistError: utils.debug('Record "{}" not exist'.format(name)) return utils.debug('Record "{}" loaded'.format(name)) self.configure(record)
def save_single_img(self, file_name="DepthPerspective " + str(datetime.datetime.now()), path="./", format=".png"): """ Saves the image to the specified location. Args: camera_name: specifies the location of the camera in the drone. file_name: gets the day and time the image was received by default,. path: specifies the location you want to save. It saves in the directory where it is located by default. format: specifies the format of the picture (.pfm, .png etc.) takes ".png" by default """ # Save an image on path with your format Record.save_single_img(self, file_name=file_name, path=path, format=format) return
def read_excel_record(excel_path): sheet = openpyxl.load_workbook(excel_path)["Sheet1"] records = [] row_size = 1 while True: cell_value = sheet.cell(column=1, row=row_size).value if cell_value is None: break records.append(Record(cell_value)) row_size += 1 return records
def get_record(self, id=None, **dico): if id: dico['id'] = id if 'name' in dico: dico['name'] = dico['name'].lower() records = self.list_records_info() for record in records: for k in dico: if k in record and record[k] == dico[k]: return Record(self, **record) #TODO: raise Exception("Not found")
def update_record( self, index: int, dt: Optional[datetime], tag: str, is_active: Optional[bool], note: str, update: bool = False, ): tag_id = self._tag_to_tag_id(tag) self.word_set.check_note(note, update) self.record_list[index] = Record(dt, tag_id, is_active, note)
def newProg(self): ##xprint("FINAL PROG") #self.printFinalProg() if(self.completedParse): return False self.programCount = self.programCount+1 self.tokenVal = [] self.tokenName = [] self.line = [] self.lastRec = Record("bullshit","asdf", "DONT USE") self.writeString = [] self.program()
def test_2children(self): parent = { 'state': 'state', 'evaluation': (1, 2, 3), 'parent': None, } recParent = Record(**parent) children = [ { 'state': 'state child1', 'evaluation': (2, 3, 4), 'parent': recParent, }, { 'state': 'state child2', 'evaluation': (3, 4, 5), 'parent': recParent, }, ] recParent.opToChild[('x', 0, True)] = Record(**children[0]) recParent.opToChild[('x', 0, False)] = Record(**children[1]) self.assertEqual(len(recParent.opToChild), 2) self.assertEqual(recParent.opToChild['x', 0, True].state, children[0]['state']) self.assertEqual(recParent.opToChild['x', 0, True].nbSteps, 1) self.assertEqual(recParent.opToChild['x', 0, False].nbSteps, 1) self.assertEqual(recParent.opToChild['x', 0, False].parent, recParent)
def excute(self, form_dict): rID = form_dict['rID'].strip() title = form_dict['rTitle'].replace('%20', ' ').strip() #fileName = form_dict['fileName'] url = form_dict['url'].strip() fileName = form_dict['originFileName'] print fileName if rID.startswith('loop-h'): historyPath = os.getcwd() + '/extensions/history/data/' + fileName[fileName.rfind('/') + 1 :] + '-history' print historyPath r = self.utils.getRecord(title, path=historyPath, matchType=2, use_cache=False, accurate=False) else: r = self.utils.getRecord(rID, path=fileName) if r != None and r.get_id().strip() != '': if rID.startswith('loop-h'): title = title.replace('%20', ' ') desc = r.get_describe() + ' ' + self.kg.getCrossref(title, ' '.join(Config.exclusive_crossref_path)) record = Record('custom-exclusive-' + rID + ' | '+ title + ' | ' + url + ' | ' + desc) localUrl = self.utils.output2Disk([record], 'exclusive', 'exclusive', append=Config.exclusive_append_mode) else: db = fileName[fileName.find('db/') + 3 : fileName.rfind('/')] + '/' key = fileName[fileName.rfind('/') + 1 :] print db + ' ' + key #return 'http://' + Config.ip_adress + '/?db=' + db + '&key=' + key + '&filter=' + title.replace('...', '') + '&column=1' localUrl = 'http://' + Config.ip_adress + '/?db=' + db + '&key=' + key + '&filter=' + rID + '&column=1&enginType=' + Config.recommend_engin_type localUrl = localUrl + '&crossrefQuery=""' return self.getUrl(r.get_url(), localUrl) else: title = title.replace('%20', ' ') desc = 'engintype:' + title + ' ' desc += 'localdb:' + title + ' ' desc += self.kg.getCrossref(title, ' '.join(Config.exclusive_crossref_path)) record = Record('custom-exclusive-' + rID + ' | '+ title + ' | ' + url + ' | ' + desc) localUrl = self.utils.output2Disk([record], 'exclusive', 'exclusive', append=Config.exclusive_append_mode) localUrl = localUrl + '&crossrefQuery=""' return self.getUrl(url, localUrl)
def clean_task(self): """ check task hash for unfinished long running tasks, requeue them. Requeue safety: `self.timeout` must longer than crawler(worker) job timeout, or else `clean_task` add item back to queue, at the same time, job finished and removed from `self.timehash`. """ if self.timeout is None: self.conn.delete(self.timehash) return items = [] time_now = time.time() for field, value in self.conn.hgetall(self.timehash).iteritems(): if field == 'background_cleaning': continue start_time = float(value) if time_now - start_time > self.timeout: failed_times = self.get_failed_times(field) if failed_times == 0: # means task_done already for this field continue failed_times += 1 if failed_times > self.failure_times: Record.instance().increase_failed(self.key) self.conn.hdel(self.timehash, field) else: items.append(field) items, items_tail = items[:self.batch_size], items[self.batch_size:] while items: print('requeuing {} items(e.g. ... {}) to {}'.format( len(items), items[-10:], self.key)) pipeline = self.conn.pipeline() pipeline.hdel(self.timehash, *items) pipeline.sadd(self.key, *items) pipeline.execute() items, items_tail = items_tail[:self.batch_size], items_tail[ self.batch_size:]
def saveIncrementalPapers(self, fileNumber, lines): file_name = self.get_file_name("eecs/papers/arxiv/arxiv" + fileNumber, self.school) file_lines = self.countFileLineNum(file_name) f = self.open_db(file_name + ".tmp") self.count = 0 for line in lines: self.count += 1 record = Record(line) rawid = self.parse_arxiv_url(record.get_url().strip())[0].replace( '.', '-') self.write_db(f, 'arxiv-' + rawid, record.get_title().strip(), record.get_url().strip(), record.get_describe().strip()) self.close_db(f) if file_lines != self.count and self.count > 0: self.do_upgrade_db(file_name) print "before lines: " + str(file_lines) + " after update: " + str( self.count) + " \n\n" else: self.cancel_upgrade(file_name) print "no need upgrade\n"
def __init__(self): '''Provide the necessary lists containing message information.''' Record.__init__(self) # Fields which are required by the message format. self._mandatory_fields = ["VMUUID", "SiteName", "MachineName"] # This list allows us to specify the order of lines when we construct records. self._msg_fields = [ "VMUUID", "SiteName", "CloudComputeService", "MachineName", "LocalUserId", "LocalGroupId", "GlobalUserName", "FQAN", "Status", "StartTime", "EndTime", "SuspendDuration", "WallDuration", "CpuDuration", "CpuCount", "NetworkType", "NetworkInbound", "NetworkOutbound", "PublicIPCount", "Memory", "Disk", "BenchmarkType", "Benchmark", "StorageRecordId", "ImageId", "CloudType", "IPv4Count", "IPv6Count", 'SuspendTime', "StorageUsage", "CpuChange" ] # This list specifies the information that goes in the database. self._db_fields = self._msg_fields[:9] + ['VO', 'VOGroup', 'VORole' ] + self._msg_fields[9:] self._all_fields = self._db_fields self._ignored_fields = [ "UpdateTime", "StorageUsage", "CpuChange", "IPv4Count", "IPv6Count" ] # Fields which will have an integer stored in them self._int_fields = [ "SuspendDuration", "WallDuration", "CpuDuration", "CpuCount", "NetworkInbound", "NetworkOutbound", "PublicIPCount", "Memory", "Disk", "StorageUsage", "StartTime", "EndTime", "IPv4Count", "IPv6Count", "CpuChange", "SuspendTime" ] self._float_fields = ['Benchmark'] self._datetime_fields = [] CloudRecord.all_records.append(self)
def editRecord(self, rID, data, originFileName): print data record = Record(' | | | ' + data) newid = self.utils.reflection_call('record', 'WrapRecord', 'get_tag_content', record.line, {'tag': 'id'}) if newid != None: newid = newid.strip() title = self.utils.reflection_call('record', 'WrapRecord', 'get_tag_content', record.line, { 'tag': 'title' }).strip() url = self.utils.reflection_call('record', 'WrapRecord', 'get_tag_content', record.line, { 'tag': 'url' }).strip() desc = data.replace('title:' + title, '').replace('url:' + url, '').strip() print rID print title print url print desc newline = '' if rID.startswith('custom-'): desc = desc.replace('id:' + newid, '').strip() newline = newid + ' | ' + title + ' | ' + url + ' | ' + desc + '\n' else: newline = rID + ' | ' + title + ' | ' + url + ' | ' + desc + '\n' print 'newline:' print newline if os.path.exists(originFileName): f = open(originFileName, 'rU') all_lines = [] for line in f.readlines(): if rID != line[0:line.find('|')].strip(): all_lines.append(line) else: print 'old line:' print line all_lines.append(newline) f.close() f = open(originFileName, 'w') if len(all_lines) > 0: for line in all_lines: f.write(line) else: f.write('') f.close() return 'refresh' return 'error'
def load(id, path=None): """ Load the content of the record from disk, parse it, and return a record instance. """ if not path: path = XmlStorage.idToPath(id) try: code = open(path).read() doc = XmlStorage.sourceToDom(code) except: return None # collect all fields' data fields = {} for node in doc.firstChild.childNodes: if node.nodeType == node.ELEMENT_NODE: name = node.localName textNode = node.firstChild data = textNode.data if textNode else '' fields[name] = data fields = Record.convertFields(fields.items()) return Record(**fields)
def generate_list_object(self, document_path): object_array = [] for line in open(document_path): line = line.strip('\n') line = line.strip('[') line = line.strip(']') split_result = line.split(', ') class_number = split_result[len(split_result) - 1] x = split_result[0] y = split_result[1] record_object = Record(x, y, class_number) object_array.append(record_object) return object_array
def convert(self, record): object = getSecondArgumentOfPredicate(record.groundedFormula, '_obj') if object is None: return None color = record.answer newRecord = Record.fromOther(record) newRecord.question = 'Is the {} {}?'.format(object, color) newRecord.questionType = 'yes/no' newRecord.answer = 'no' newRecord.formula = '_predadj(A, B)' newRecord.groundedFormula = '_predadj({}, {})'.format(object, color) return newRecord
def get_unconfirmed_records(self, address): #ur.endorser -> sign it, ur.endorsee -> show in pending self.request_nodes_from_all() for node in self.full_nodes: url = self.URECORD_URL.format(node, self.FULL_NODE_PORT, address) try: response = requests.get(url) urecords = [] for record in json.loads(response.content.decode('utf-8'))['records']: urecords.append(Record.from_json(json.loads(record))) return urecords except requests.exceptions.RequestException as re: pass
def __init__(self): Record.__init__(self) self._mandatory_fields = [ "MeasurementTime", "SiteName", "CloudType", "LocalUser", "LocalGroup", "GlobalUserName", "FQAN", "IPVersion", "IPCount" ] self._msg_fields = [ "MeasurementTime", "SiteName", "CloudType", "LocalUser", "LocalGroup", "GlobalUserName", "FQAN", "IPVersion", "IPCount", "CloudComputeService" ] self._all_fields = self._msg_fields self._db_fields = self._msg_fields self._int_fields = [ "IpCount", ] self._unix_timestamp_fields = [ "MeasurementTime", ] PublicIpUsageRecord.all_records.append(self)
def create_record(self, request): session_id = request.getSession().uid.decode('utf-8') for instance in self.instances: if instance.session_id == session_id: key = instance.get_key_by_session(session_id) content = request.content.read().decode('utf-8') record_data = content.split('&') endorser = record_data[0].split('=')[1] detail = record_data[1].split('=')[1] record = Record(key.get_public_key(), endorser.replace('%3A', ':'), detail) self.broadcast_record(record) message = "Record created. <a href='/user'>Go Back</a>" return json.dumps(message)
def get_age_person(record: rec.Record) -> int or None: """ Получение возраста человека из данной записи. :param record: данная запись :return: возраст (int), если функция record.get_age() вернула не None, иначе None """ age = record.get_age() if age is None: return return age
def parse_record(count, data, section, offset): """Конвертация записи в классовый объект""" result = [] for i in range(count): chunks = int(data[offset: offset + 4], 16) name, _ = get_part_name("", chunks, data) ttl = section[12:20] msg_type = section[4:8] record_data = section[24:24 + int(section[20:24], 16) * 2] result.append((Record(record_data, msg_type, ttl), name)) return result
def replay(self): recordpath = self.record_path.get() if not recordpath: self.show_message(message='请点击"open"按扭选择棋谱位置') return self.stop() self.event.set() record = Record() record.read(recordpath) init_bd = record[0][0] self.init_stone(init_bd) record_iter = iter(record) length = len(record) record.n = 1 def play_next_step(): self.event.wait() try: board, from_, action, reward = next(record_iter) player = board[from_] to_ = tuple(np.add(from_, rule.actions_move[action])) assert (board == self.board()).all(), str(board) + '\n' + str( self.board()) result = self.move_to(self.stone(from_), to_) if result == rule.WIN: winner_text = self.winner_black if player == 1 else self.winner_white self.canvas.itemconfigure(winner_text, state=tk.NORMAL) self.show_message( str(length) + ':' + str(record.n) + ',reward: ' + str(reward)) record.n += 1 self.replay_timer = threading.Timer(self.period, play_next_step) self.replay_timer.start() except StopIteration: return self.replay_timer = threading.Timer(self.period, play_next_step) self.replay_timer.start()
def data(self, string): log.debug("parse '{0}' record".format(self.type)) if len(string) > 0: f = io.BytesIO(string) self.version = Version(f.read(1)) if self.version.major != 1: raise DecodeError("unsupported major version") if self.version >= Version('\x12'): record = Record(data=f) if record.type == "urn:nfc:wkt:cr": self.nonce = struct.unpack(">H", record.data)[0] else: s = "cr record is required for version {v.major}.{v.minor}" raise FormatError(s.format(v=self.version)) while f.tell() < len(string): record = Record(data=f) if record.type == 'urn:nfc:wkt:ac': carrier = AlternativeCarrier(record.data) self.carriers.append(carrier) else: s = "skip unknown local record {0}" log.warning(s.format(record.type))
def add(self, name: str, note: str, link: str, price: float) -> int: rows_before_insert = self.rowCount() record = Record(name, note, link, price) self._session.add(record) self._session.commit() self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount()) self._items.append(record) if rows_before_insert == 0: self._generate_role_names() self.endInsertRows() return record.id
def _check_fields(self): ''' Add extra checks to those made in every record. ''' # First, call the parent's version. Record._check_fields(self) # Extract the relevant information from the user fqan. # Keep the fqan itself as other methods in the class use it. role, group, vo = parse_fqan(self._record_content['FQAN']) # We can't / don't put NULL in the database, so we use 'None' if role is None: role = 'None' if group is None: group = 'None' if vo is None: vo = 'None' if self._record_content['Benchmark'] is None: # If Benchmark is not present in the original record the # parent Record class level type checking will set it to # None. We can't pass None as a Benchmark as the field is # NOT NULL in the database, so we set it to something # meaningful. In this case the float 0.0. self._record_content['Benchmark'] = 0.0 self._record_content['VORole'] = role self._record_content['VOGroup'] = group self._record_content['VO'] = vo # If the message was missing a CpuCount, assume it used # zero Cpus, to prevent a NULL being written into the column # in the CloudRecords tables. # Doing so would be a problem despite the CloudRecords # table allowing it because the CloudSummaries table # doesn't allow it, creating a problem at summariser time. if self._record_content['CpuCount'] is None: self._record_content['CpuCount'] = 0
def insert(self, code, desc): """ Inserts code and desc into table """ if (self.btree is None): new_config = self.buffer.new_datablock( 4, self.buffer.datafile.NUM_DATABLOCKS - 1) self.btree = BTree(root=self.BTREE_ROOT_DEFAULT, buffer=self.buffer) self.btree.init() new_record = Record(code=code, description=desc) #Search if code already exists if (self.btree.has_key(code)): print('Record with code %s already exists' % code) return None dblock, position = self.buffer.search_dblock_with_free_space( new_record.size() + 4, 1) new_record = dblock.write_data(new_record, position) self.btree.insert(new_record.code, new_record.rowid) print('Record Inserted') pass
def pull_log(self): reader = self._get_log_line() now = self._get_rounded_time() offset, delta = self._parse_header(reader.next()) now -= offset records = [] for record_raw in reader: record = Record(int(record_raw), now) records.append(record) now -= delta return records
def test_tag_id_and_note_filter(self): dt = datetime(year=2021, month=1, day=27, hour=20, minute=53) self._record_list.append(Record(dt, note='note A')) self._record_list.append(Record(dt, note='note B')) self._record_list.append(Record(dt, tag_id=1, note='note A')) self._record_list.append(Record(dt, tag_id=1, note='note B')) self.assertEqual( ([0], ['[27.01.2021 20:53:00] <no tag> note A']), self._record_list.filter_record(tag_id=0, fragment='A'), ) self.assertEqual( ([1], ['[27.01.2021 20:53:00] <no tag> note B']), self._record_list.filter_record(tag_id=0, fragment='B'), ) self.assertEqual( ([2], ['[27.01.2021 20:53:00] <test tag> note A']), self._record_list.filter_record(tag_id=1, fragment='A'), ) self.assertEqual( ([3], ['[27.01.2021 20:53:00] <test tag> note B']), self._record_list.filter_record(tag_id=1, fragment='B'), )
def handle_payment(self, request): session_id = request.getSession().uid.decode('utf-8') for instance in self.instances: if instance.session_id == session_id: key = instance.get_key_by_session(session_id) razorpay_payment_id = request.content.read().decode('utf-8').split('&')[1].split('=')[1] #TODO: capture payment endorser = self.get_genesis_user_address() endorsee = key.get_public_key() detail = "CoinPurchase" record = Record(endorsee, endorser, detail) self.broadcast_record(record) message = "Once transaction is mined, amount will be credited to you account. <a href='/user'>Go Back</a>" return json.dumps(message)
def processData(self, data): result = '' info = '' for line in data.split('\n'): r = Record(line) url = r.get_url().strip() if self.convert_contain != '' and line.find( self.convert_contain) == -1: continue if self.convert_filter != '' and line.find( self.convert_filter) != -1: continue if url.find('twitter') != -1: info += url[url.rfind('/') + 1:] + ', ' result += line + '\n' print info[0:len(info) - 2] return result
def getData() : """ open the input file, check whether the file can be opened or not, create a list of Record objects check whether each line in the input file is valid or not""" try : with open (FILENAME, "r") as infile : obList = [Record(line) for line in infile if line != ""] return obList except IOError: # FileNotFoundError print(FILENAME, "not found") raise SystemExit except ValueError as e: print(e) raise SystemExit
def handleKnowledgeGraph(): url = request.form['url'].strip() fileName = request.form['fileName'].strip() if os.path.exists(fileName): f = open(fileName, 'rU') lines = f.readlines() f.close() if ''.join(lines).find('keyword:') == -1: new_lines_record = [] for line in lines: if line.strip() != '': record = Record(line) description = utils.reflection_call( 'record', 'WrapRecord', 'get_tag_content', record.line, {'tag': 'description'}) if description == None or description.strip() == '': return '' description = description.strip() description_list = description.split('#') print description kb_str = kg.getKnowledgeGraph(record.get_title().strip(), description_list[0], '', description_list[2], description_list[1]) print kb_str new_lines_record.append( Record( line.replace('\n', '').replace( 'description:' + description, '') + ' ' + kb_str)) if len(new_lines_record) == 1: url = utils.output2Disk(new_lines_record, 'main', 'exclusive') if url != '': return url else: return '' return ''