def post(self): response = "" try: # 参数是字典,不上传文件 if self.data_type == "data" and self.upload_file == "": response = self.session.post(url=self.url, data=self.parameter) # 参数是字典,上传文件 elif self.data_type == "data" and self.upload_file != "": response = self.session.post(url=self.url, data=self.parameter, files=self.upload_file) # 参数是json,上传文件 elif self.data_type == "json" and self.upload_file != "": response = self.session.post(url=self.url, json=self.parameter, files=self.upload_file) # 参数是json,不上传文件 elif self.data_type == "json" and self.upload_file == "": response = self.session.post(url=self.url, json=self.parameter) log.debug("运行post请求成功,请求的参数是:{}".format(self.parameter)) log.info("运行post请求成功") except Exception as e: log.error("post请求失败!,错误信息是:{}".format(e)) log.error("post请求失败!,请求的参数是:{}".format(self.parameter)) raise e return response.json()
def test_is_balanced_tree(self): perfect_tree = BinaryTreeFactory.create_perfect_tree() log.debug(perfect_tree) is_perfect_tree_balanced = is_balanced_tree_ver1(perfect_tree.get_root()) self.assertTrue(is_perfect_tree_balanced) complete_tree = BinaryTreeFactory.create_complete_tree() log.debug(complete_tree) is_complete_tree_balanced = is_balanced_tree_ver1(complete_tree.get_root()) self.assertTrue(is_complete_tree_balanced)
def real_data_handler(self, name, key, block, length): """ real api event handler :param name: real name :param key: real key :param block: data :param length: length of data :return: void """ log.debug('[api] receive real: {}'.format(str(block))) self._real_event_handler(block)
def test_is_unbalanced_tree(self): balanced_tree = BinaryTreeFactory.create_balanced_tree() log.debug(balanced_tree) is_balanced_tree_balanced = is_balanced_tree_ver1(balanced_tree.get_root()) self.assertTrue(is_balanced_tree_balanced) unbalanced_tree = BinaryTreeFactory.create_unbalanced_tree() log.debug(unbalanced_tree) is_unbalanced_tree_balanced = is_balanced_tree_ver1(unbalanced_tree.get_root()) self.assertFalse(is_unbalanced_tree_balanced)
def test_simple_graph(self): graph = GraphFactory.create_graph() count = len(graph.get_nodes()) for node in graph.get_nodes(): max_edge_per_node = random.randint(0, count) for _ in range(max_edge_per_node): node.add_edges([ (graph.get_random_node().name, 0) ]) is_directed_graph_ver1() log.debug(graph) self.assertTrue(True)
def create_unbalanced_tree(): """Tree: {None: 1}{1: 2}{1: 3}{3: 6}{3: 7}{6: 8}""" tree = Tree() log.debug(tree.get_root()) tree.insert(Node(4, parent_name=None)) tree.insert(Node(2, parent_name=4)) tree.insert(Node(6, parent_name=4)) tree.insert(Node(1, parent_name=2)) tree.insert(Node(5, parent_name=6)) tree.insert(Node(7, parent_name=6)) tree.insert(Node(8, parent_name=7)) tree.insert(Node(9, parent_name=8)) return tree
def write_error_response(self, status, reason, message, headers={}): log.info('write_error_response(%d,%s,%s) enter', status, reason, message) #返回码和reason self.set_status(status, reason) #设置头 for i in headers: self.set_header(i, headers[i]) log.debug('error body:%s', message) self.write(message) self.finish() log.info('write_error_response(%d,%s,%s) out', status, reason, message)
def parse(datafile): # reading data from current file data = open(datafile, 'r').readlines() source = SourceFile(datafile) # removing 3 descriptive lines in the begin of file data.pop(0) data.pop(0) data.pop(0) log.debug('<%i> %s' % (len(data), source.filename)) for data_row in data: source.nete500ms_data.append(NeTe500Ms.parse_line(data_row)) return [source]
def parse(datafile): # reading data from current file data_lines = open(datafile, 'r').readlines() data = [] for i in range(0, len(data_lines) / 2): data.append((data_lines[2 * i], data_lines[2 * i + 1])) source = SourceFile(datafile) year = source.filename[0:4] day_of_year = source.filename[4:7] log.debug('<%i> %s' % (len(data), source.filename)) for data_row in data: source.ntv2s_data.append(NTV2s.parse_line(data_row)) return [source]
def handle(self, method): log.debug('HttpServer.handle enter') self.print_request() ret, status, reason, body = True, 200, 'OK', '' if self.check_requests_times() == False: self.write_error_response(403, 'request so fast', 'request so fast') return ret, status, reason, body = self.init_request() if ret == False: self.write_error_response(status, reason, body) return ret, status, reason, body = self.handle_request() if ret == False: self.write_error_response(status, reason, body) return self.write_response(status, reason, body) log.debug('HttpServer.handle normal out. err cannot come here')
def fid_data_handler(self, rid, block, length): """ FID api handler :param rid: request id :param block: data block :param length: length of data :return: void """ # below is an example of the processing reference logic, not actual business logic. count = self.get_fid_output_count(rid) log.debug('[api] received data count: {}'.format(str(count))) responses = [] for i in range(0, count): response = { 'TIME': self.get_fid_output_data(rid, '8', i), 'DATE': self.get_fid_output_data(rid, '9', i) } responses.append(response) log.debug('[api] response output: {}'.format(str(responses)))
def parse(datafile): # reading data from current file data = open(datafile, 'r').readlines() source = SourceFile(datafile) # removing 2 descriptive lines in the begin of file data.pop(0) data.pop(0) year = source.filename[0:4] day_of_year = source.filename[4:7] log.debug('<%i> %s' % (len(data), source.filename)) for data_row in data: source.nt1s_data.append(NT1s.parse_line(data_row, year, day_of_year)) return [source]
def write_response(self, status, reason, res_body='', headers={}): log.debug('write_response(%d,%s) enter', status, reason) self.set_status(status, reason) for i in headers: self.set_header(i, headers[i]) #去掉Last-Modified self.set_header('Last-Modified', '') #Content-Type if '' != res_body and 'Content-Type' not in headers: self.set_header('Content-Type', 'text/plain') #body if '' != res_body: self.write(res_body) #else: # self.write('test string') log.debug('write_response(%d,%s) out', status, reason)
def make_request(self, uri='/', params={}): headers = {'User-Agent': 'TubeSync'} token = self.object.loaded_options['token'] params['X-Plex-Token'] = token base_parts = urlsplit(self.object.url) qs = urlencode(params) url = urlunsplit((base_parts.scheme, base_parts.netloc, uri, qs, '')) if self.object.verify_https: log.debug(f'[plex media server] Making HTTP GET request to: {url}') return requests.get(url, headers=headers, verify=True, timeout=self.TIMEOUT) else: # If not validating SSL, given this is likely going to be for an internal # or private network, that Plex issues certs *.hash.plex.direct and that # the warning won't ever been sensibly seen in the HTTPS logs, hide it with warnings.catch_warnings(): warnings.simplefilter("ignore") return requests.get(url, headers=headers, verify=False, timeout=self.TIMEOUT)
def merge_pair(mp1, mp2=None): """Merge pair of MeasurementPoints. Check if point have neighbour with time difference 2 seconds. If don't we will take values of original point, otherwise we will take middle value of this point and neighbour Keyword arguments: mp1 -- 'nacs' measurement point conjuncted with 'wats' mp2 -- neighbour measurement point. Default 'None' - means neighbour doesn't exists Return: mp1 """ def find_value_model(mp, type, level=1, device='nacs'): """Find measurement from mp2 conjuncted with measurement from mp1 Keyword arguments: mp -- measurement point (mp2) type -- type of measurement from mp1 level -- level of measurement from mp1 device -- device of measurement from mp1 Return: Measurement object conjuncted with selected value """ for value_model in mp.data: if value_model.type == type and value_model.level == level and value_model.device == device: return value_model return None update = [] if not mp2 or (mp1.datetime.python_type() - mp2.datetime.python_type()).seconds > 3: if not mp2: log.debug("[wats:%i:%s] edge point does not exists" % (mp1.id, str(mp1.datetime))) else: log.debug("[wats:%i:%s]&[wats:%i:%s] is to fas in time dimension" % (mp1.id, str(mp1.datetime), mp2.id, str(mp2.datetime))) for measurement in mp1.data: if measurement.device == 'nacs': nm = Measurement(measurement) nm.level = 2 update.append(nm) else: log.debug("[wats:%i:%s]&[wats:%i:%s] is goes to be resampled" % (mp1.id, str(mp1.datetime), mp2.id, str(mp2.datetime))) for measurement in mp1.data: ms = find_value_model(mp2, mp1.type) nm = Measurement(measurement) nm.level = 2 nm.value = (nm.value + ms.value) / 2 nm.error = (nm.error + ms.error) / 2 nm.correction = (nm.correction + ms.correction) / 2 update.append(nm) session_instance.add(mp1.data.extend(update)) session_instance.commit()
def test_simple_array(self): arr_b = ArrayListFactory.create_array_list() arr_a = ArrayList(['A', 'A', 'A', 'A'], max_size=len(arr_b) + 4) log.debug(arr_a) log.debug(arr_b) self.assertTrue(True)
def handle_request(self): self.write_error_response(404, 'Not Found', 'Not Found, long live') log.debug('handle_request() out')
def check_requests_times(self): log.debug('HttpServer.check_requests_times enter TODO') #TODO return True
def test_perfect_tree(self): tree = BinaryTreeFactory.create_perfect_tree() log.debug(tree) self.assertTrue(True)
def get(self): log.debug('HttpServer.get enter') self.handle('GET') log.debug('HttpServer.get out')
def post(self): log.debug('HttpServer.post enter') self.handle('POST') log.debug('HttpServer.post out')
def test_complete_tree(self): tree = BinaryTreeFactory.create_complete_tree() log.debug(tree) self.assertTrue(True)
def media_post_save(sender, instance, created, **kwargs): # Triggered after media is saved cap_changed = False can_download_changed = False # Reset the skip flag if the download cap has changed if the media has not # already been downloaded if not instance.downloaded: max_cap_age = instance.source.download_cap_date published = instance.published if not published: if not instance.skip: log.warn( f'Media: {instance.source} / {instance} has no published date ' f'set, marking to be skipped') instance.skip = True cap_changed = True else: log.debug( f'Media: {instance.source} / {instance} has no published date ' f'set but is already marked to be skipped') else: if max_cap_age: if published > max_cap_age and instance.skip: # Media was published after the cap date but is set to be skipped log.info( f'Media: {instance.source} / {instance} has a valid ' f'publishing date, marking to be unskipped') instance.skip = False cap_changed = True elif published <= max_cap_age and not instance.skip: log.info( f'Media: {instance.source} / {instance} is too old for ' f'the download cap date, marking to be skipped') instance.skip = True cap_changed = True else: if instance.skip: # Media marked to be skipped but source download cap removed log.info( f'Media: {instance.source} / {instance} has a valid ' f'publishing date, marking to be unskipped') instance.skip = False cap_changed = True # Recalculate the "can_download" flag, this may # need to change if the source specifications have been changed if instance.metadata: if instance.get_format_str(): if not instance.can_download: instance.can_download = True can_download_changed = True else: if instance.can_download: instance.can_download = False can_download_changed = True # Save the instance if any changes were required if cap_changed or can_download_changed: post_save.disconnect(media_post_save, sender=Media) instance.save() post_save.connect(media_post_save, sender=Media) # If the media is missing metadata schedule it to be downloaded if not instance.metadata: log.info(f'Scheduling task to download metadata for: {instance.url}') verbose_name = _('Downloading metadata for "{}"') download_media_metadata(str(instance.pk), priority=10, verbose_name=verbose_name.format(instance.pk), remove_existing_tasks=True) # If the media is missing a thumbnail schedule it to be downloaded if not instance.thumb_file_exists: instance.thumb = None if not instance.thumb: thumbnail_url = instance.thumbnail if thumbnail_url: log.info( f'Scheduling task to download thumbnail for: {instance.name} ' f'from: {thumbnail_url}') verbose_name = _('Downloading thumbnail for "{}"') download_media_thumbnail(str(instance.pk), thumbnail_url, queue=str(instance.source.pk), priority=10, verbose_name=verbose_name.format( instance.name), remove_existing_tasks=True) # If the media has not yet been downloaded schedule it to be downloaded if not instance.media_file_exists: instance.downloaded = False instance.media_file = None if (not instance.downloaded and instance.can_download and not instance.skip and instance.source.download_media): delete_task_by_media('sync.tasks.download_media', (str(instance.pk), )) verbose_name = _('Downloading media for "{}"') download_media(str(instance.pk), queue=str(instance.source.pk), priority=15, verbose_name=verbose_name.format(instance.name), remove_existing_tasks=True)