def add_collection(self, path, *args): """Add collection to the namespace. For instance:: inst.add_collection('/c1') :param string path: path under which to add the collection :param args[0]: :py:class:`Collection` class to add, if present :rtype: the :py:class:`Collection` which was added :raises: :py:class:`SmapException` if the parent is not a collection, or the path exists. """ if len(args) > 0 and ICollection.providedBy(args[0]): collection = args[0] elif len(args) == 0: collection = Collection(path, self) else: raise SmapException("add_collection: wrong number of arguments") path = util.split_path(path) if len(path) > 0: parent = self.get_collection(util.join_path(path[:-1])) if not parent: raise SmapException( "add_collection: parent is not collection!") parent.add_child(path[-1]) if util.join_path(path) in self.OBJS_PATH: raise SmapException("add_timeseries: path " + str(path) + " exists!") self.OBJS_PATH[util.join_path(path)] = collection if not self.loading: self.reports.update_subscriptions() return collection
def add_collection(self, path, *args): """Add collection to the namespace. For instance:: inst.add_collection('/c1') :param string path: path under which to add the collection :param args[0]: :py:class:`Collection` class to add, if present :rtype: the :py:class:`Collection` which was added :raises: :py:class:`SmapException` if the parent is not a collection, or the path exists. """ if len(args) > 0 and ICollection.providedBy(args[0]): collection = args[0] elif len(args) == 0: collection = Collection(path, self) else: raise SmapException("add_collection: wrong number of arguments") path = util.split_path(path) if len(path) > 0: parent = self.get_collection(util.join_path(path[:-1])) if not parent: raise SmapException("add_collection: parent is not collection!") parent.add_child(path[-1]) if util.join_path(path) in self.OBJS_PATH: raise SmapException("add_timeseries: path " + str(path) + " exists!") self.OBJS_PATH[util.join_path(path)] = collection if not self.loading: self.reports.update_subscriptions() return collection
def test_join_path(): s = '\n' s += "'./dir1', 'a.txt' = " + util.join_path('./dir1', 'a.txt') + '\n' s += "'./dir1/', 'a.txt' = " + util.join_path('./dir1/', 'a.txt') + '\n' s += "'./dir1', '/a.txt' = " + util.join_path('./dir1', '/a.txt') + '\n' s += "'./dir1/', '/a.txt' = " + util.join_path('./dir1/', '/a.txt') + '\n' return s
def add_timeseries(self, path, *args, **kwargs): """Add a timeseries to the smap server at the given path. This will generate a UUID for the timeseries. direct form :param path a Timeseries instance simple form :param args[0] is a uuid instance, or a key to generate a uuid with by combining it with the root uuid. :param args[1] and kwargs are arguments passed to the Timeseries constructor. Therefore you have to include at least the UnitofMeasure :param boolean replace: (kwarg) replace an existing timeseries at that path instead of throwing an exception :param boolean recurse: recursively create parent collections instead of thrwoing an exception. Default is True. :raises: :py:class:`SmapException` if the parent isn't a collection or the path already exists. """ replace = kwargs.pop('replace', False) recurse = kwargs.pop('recurse', True) klass = kwargs.pop('klass', Timeseries) if len(args) == 0 or \ not ITimeseries.providedBy(args[0]) and not IActuator.providedBy(args[0]): if len(args) == 2: if not isinstance(args[0], uuid.UUID): id = self.uuid(args[0], namespace=kwargs.get('namespace', None)) else: id = args[0] args = args[1:] elif len(args) == 1: id = self.uuid(util.norm_path(path), kwargs.get('namespace', None)) else: id = self.uuid(util.norm_path(path)) # raise SmapException("SmapInstance.add_timeseries may only be called " # "with two or three arguments") kwargs.pop('namespace', None) timeseries = klass(id, *args, **kwargs) if id != args[0]: setattr(timeseries, "key", args[0]) else: timeseries = args[0] path = util.split_path(path) if recurse: self._add_parents(path) parent = self.get_collection(util.join_path(path[:-1])) if not replace and util.join_path(path) in self.OBJS_PATH: raise SmapException("add_timeseries: path " + str(path) + " exists!") if not parent: raise SmapException("add_timeseries: parent is not a collection!") parent.add_child(path[-1]) # place the new timeseries into the uuid and path tables self.OBJS_UUID[timeseries['uuid']] = timeseries self.OBJS_PATH[util.join_path(path)] = timeseries timeseries.inst = self setattr(timeseries, 'path', util.join_path(path)) if not self.loading: self.reports.update_subscriptions() return timeseries
def _iter_items(cls, repo, common_path = None): if common_path is None: common_path = cls._common_path_default rela_paths = set() # walk loose refs # Currently we do not follow links for root, dirs, files in os.walk(join_path_native(repo.git_dir, common_path)): if 'refs/' not in root: # skip non-refs subfolders refs_id = [ i for i,d in enumerate(dirs) if d == 'refs' ] if refs_id: dirs[0:] = ['refs'] # END prune non-refs folders for f in files: abs_path = to_native_path_linux(join_path(root, f)) rela_paths.add(abs_path.replace(to_native_path_linux(repo.git_dir) + '/', "")) # END for each file in root directory # END for each directory to walk # read packed refs for sha, rela_path in cls._iter_packed_refs(repo): if rela_path.startswith(common_path): rela_paths.add(rela_path) # END relative path matches common path # END packed refs reading # return paths in sorted order for path in sorted(rela_paths): try: yield cls.from_path(repo, path) except ValueError: continue
def main(): log.info("Running step 1") input_folder = join_path(os.getcwd(), paths_dic['orig_shapes']) orig_shapes_names = [f for f in listdir(input_folder) if isfile(join(input_folder, f)) and f.endswith('.bmp')] output_folder = join_path(os.getcwd(), paths_dic['prepared_for_mfd']) if not os.path.exists(output_folder): os.makedirs(output_folder) for orig_shape_name in orig_shapes_names: run_funcs(orig_shape_name, input_folder, output_folder) log.info("Images prepared for mfd saved to %s" % output_folder) log.info("----------------- Finished Successfully -----------------")
class InstanceResource(resource.Resource): """Resource which maps HTTP requests to requests on the sMAP instance. """ def __init__(self, inst): self.inst = inst resource.Resource.__init__(self) isLeaf = True def render_GET(self, request): request.setHeader('Content-type', 'application/json') # assemble the results try: obj = self.inst.lookup(util.join_path(request.postpath)) except Exception, e: setResponseCode(request, exception, 500) request.finish() if obj == None: request.setResponseCode(404) return ("No such timeseries or collection: " + util.join_path(request.postpath) + '\n') else: d = defer.maybeDeferred(core.SmapInstance.render_lookup, request, obj) d.addCallback(lambda x: self.send_reply(request, x)) d.addErrback(lambda x: self.send_error(request, x)) return server.NOT_DONE_YET
def explore(item, path): if not 'Contents' in item: item.dirty = True else: for ps in item['Contents']: newpath = path + [ps] explore(self.inst.lookup(util.join_path(newpath)), newpath)
def debugging_draw_points(size, image_name, touch_points, medial_axis, sampled_points, debug_path): util.create_new_path(debug_path) (width, height) = size new_img = Image.new('RGB', (width, height), "gray") draw_points(new_img, touch_points, config.colors_dic['blue']) draw_points(new_img, medial_axis, config.colors_dic['red']) draw_points(new_img, sampled_points, config.colors_dic['white']) new_img.save(join_path(debug_path, image_name))
def render_GET(self, request): request.setHeader('Content-type', 'application/json') # assemble the results try: obj = self.inst.lookup(util.join_path(request.postpath)) except Exception, e: setResponseCode(request, exception, 500) request.finish()
def other_fee_open_slot(self): self.other_fee_ui.show() path = util.join_path(variables.pre_path_other_fee, variables.file_name_other_fee, variables.postfix_other_fee) lines = self.model.read(path) if not lines: return view_data = self.other_fee_translator.stored_2_view(lines) self.other_fee_view.write(self.other_fee_ui, view_data)
def render_PUT(self, request): request.setHeader('Content-type', 'application/json') # you can only PUT actuators obj = self.inst.lookup(util.join_path(request.postpath)) d = defer.maybeDeferred(core.SmapInstance.render_lookup, request, obj) d.addCallback(lambda x: self.send_reply(request, x)) d.addErrback(lambda x: self.send_error(request, x)) return server.NOT_DONE_YET
def render_PUT(self, request): request.setHeader('Content-type', 'application/json') # you can only PUT actuators obj = self.inst.lookup(util.join_path(request.postpath), pred=IActuator.providedBy) d = defer.maybeDeferred(core.SmapInstance.render_lookup, request, obj) d.addCallback(lambda x: self.send_reply(request, x)) d.addErrback(lambda x: self.send_error(request, x)) return server.NOT_DONE_YET
def scrape(): login_data = get_login_form() # initialize registrations_data = {} with requests.Session() as session: login_url = util.join_path(ISA_BASE_URL, ISA_LOGIN_ACTION) session.post(login_url, data=login_data) report_url = util.join_path(ISA_BASE_URL, ISA_REPORTS_INSCRIPTIONS_COURS) report_data = get_report_form() response = session.get(report_url, data=report_data) soup = bsoup(response.text, 'html.parser') # get all ids and course names below # "Cliquez sur une des matières pour avoir les inscriptions" ww_x_MAT = [(link.get('onclick')[32:-42], link.text.strip()) for link in soup.find_all(class_='ww_x_MAT')] ww_x_MAT_timeouts = [] course_report_url = util.join_path(ISA_BASE_URL, ISA_COURSE_REPORT) # loop over course ids and names for x_MAT, course_name in ww_x_MAT: try: print(f"scraping {x_MAT}: {course_name}") report_data = get_report_form(x_MAT) response = session.get(course_report_url, data=report_data, timeout=30) soup = bsoup(response.text, 'html.parser') print(soup.prettify()) if course_name not in registrations_data: # sometimes the course name is duplicated registrations_data[course_name] = {} get_isa_course_report(registrations_data[course_name], soup) except requests.exceptions.Timeout: ww_x_MAT_timeouts.append((x_MAT, course_name)) continue
def open_statistics_file(stat_path): util.create_new_path(stat_path) file_name = "statistics_%s_%s_%s_%s_%s" % ( str(config.curr_grid_size), str(config.curr_clicks_threshold), str(config.curr_sampled_points), str(config.curr_number_of_iterations), str(config.curr_radius_threshold)) stat_file = XlsxFile(join_path(stat_path, file_name)) stat_file.create_file() headers = config.stat_headers stat_file.write_to_sheet(headers) return stat_file
def lookup(self, id, pred=None): """Retrieve an object in the resource hierarchy by path or uuid. If *id* is a string not starting with ``/``, it will be passed to the :py:class:`uuid.UUID` constructor; otherwise it will be treated as a pathname. *pred* is an optional predicate which can be used to test the result. """ if util.is_string(id): path = util.split_path(id) if len(path) > 0 and path[-1][0] == "+": return self._lookup_r(util.join_path(path[:-1]), pred=pred) else: obj = self.OBJS_PATH.get(util.join_path(path), None) elif isinstance(id, uuid.UUID): return self.OBJS_UUID.get(id, None) else: obj = None if not pred or pred(obj): return obj else: return None
def main(): log.info("Running step 3") orig_folder = join_path(os.getcwd(), paths_dic['orig_shapes']) orig_shapes_names = [ f for f in listdir(orig_folder) if isfile(join(orig_folder, f)) and f.endswith('.bmp') ] before_mfd_folder = join_path(os.getcwd(), paths_dic['prepared_for_mfd']) after_mfd_folder = join_path(os.getcwd(), paths_dic['after_mfd']) output_folder = join_path(os.getcwd(), paths_dic['medial_axised']) if not os.path.exists(output_folder): os.makedirs(output_folder) for orig_shape_name in orig_shapes_names: run(orig_shape_name, orig_folder, before_mfd_folder, after_mfd_folder, output_folder) log.info("----------------- Finished Successfully -----------------")
def __salary_list__(self, data): xls_data = [] xls_head_data = [] xls_head_data.append('序') xls_head_data.extend(variables.string_sum_items) xls_data.append(xls_head_data) i = 0 total = 0 for data1 in data: i += 1 items = list(data1.items()) items.sort(key=functools.cmp_to_key(util.sum_sort_cmp)) out_data1 = [] out_data1.append(i) out_data1.extend([item[1] for item in items]) total += out_data1[-1] xls_data.append(out_data1) row_total = [] row_total.append(variables.string_total) row_total.append(total) xls_data.append(row_total) path = util.join_path(variables.pre_path_sum_xsl, variables.string_salary_table, 'xls') self.xsl_model.single_array_write(path, xls_data, variables.string_salary_table) xls_data_salary_sheet = [] header = xls_data[0][1:] j = 1 num_line = len(xls_data) while j < num_line-1: xls_data_salary_sheet.append(header) xls_data_salary_sheet.append(xls_data[j][1:]) xls_data_salary_sheet.append([]) j += 1 path = util.join_path(variables.pre_path_sum_xsl, variables.string_salary_sheet, 'xls') return self.xsl_model.single_array_write(path, xls_data_salary_sheet, variables.string_salary_sheet)
def run(orig_shape_name, orig_folder, before_mfd_folder, after_mfd_folder, output_folder): log.info("Processing %s" % orig_shape_name) orig_shape_path = join_path(orig_folder, orig_shape_name) before_mfd_path = join_path(before_mfd_folder, util.get_binary_image_name(orig_shape_name)) after_mfd_image_name = util.get_image_name_clean_after_mfd(orig_shape_name) after_mfd_path = join_path(after_mfd_folder, after_mfd_image_name) output_image_name = after_mfd_image_name orig_shape = util.read_image(orig_shape_path) shape_before_mfd = util.read_image(before_mfd_path) shape_after_mfd = util.read_image(after_mfd_path) log.info("Overriding shape frame: red -> white") clean_img = override_frame(shape_before_mfd, shape_after_mfd) log.info("Overriding outside shape red lines: red -> black") clean_img = clean_outside_red_lines(orig_shape, clean_img) log.info("Saving clean image") util.save_on_path(clean_img, output_image_name, output_folder)
def main(): log.info("Running step 2") mfd_folder = util.join_path(os.getcwd(), config.paths_dic['mfd']) before_mfd_folder = util.join_path(os.getcwd(), config.paths_dic['prepared_for_mfd']) output_folder = util.join_path(os.getcwd(), config.paths_dic['after_mfd']) os.chdir(mfd_folder) shapes_names = [ f for f in listdir(before_mfd_folder) if isfile(join(before_mfd_folder, f)) and f.endswith('.bmp') ] if not os.path.exists(output_folder): os.makedirs(output_folder) for shape_name in shapes_names: run(before_mfd_folder, shape_name, output_folder) log.info("Images after mfd saved to %s" % output_folder) log.info("----------------- Finished Successfully -----------------")
def index_changed_slot(self, index): if self.lastComboxIdx != 0: last_truck_name = self.view.truck_name(self.lastComboxIdx, self.ui) self.__save_from_view_2_stored__(last_truck_name) self.ui.tableWidget.setEnabled(index != 0) self.lastComboxIdx = index current_truck_name = self.view.current_truck_name(self.ui) path = util.join_path(variables.pre_path__product_value_stored, current_truck_name, r'pv') lines = self.model.read(path) if lines: lines = self.translator.stored_2_view(lines) self.view.write(lines, self.ui) else: self.view.clear_table_text(self.ui)
def run_funcs(orig_shape_name, folder_input_path, folder_output_path): shape_input_path = join_path(folder_input_path, orig_shape_name) output_image_name = 'binary_%s' % orig_shape_name log.info("Reading %s" % orig_shape_name) image = util.read_image(shape_input_path) log.info("Improving shape coloring") image = improve_coloring(image) log.info("Calculating boundaries") boundaries = calc_boundaries(image) log.info("Marking boundary") image = mark_boundary(image, boundaries, config.colors_dic['black']) log.info("Building binary") image = whiten(image) image = replace_white_and_black(image) util.save_on_path(image, output_image_name, folder_output_path) log.info("New shape %s saved on %s" % (output_image_name, folder_output_path))
def _add(self, *args): """Add a new reading to this timeseries. This version must only be called from the :py:mod:`twisted` main loop; *i.e.* from a callback added with ``reactor.callFromThread()`` Can be called with 1, 2, or 3 arguments. The forms are * ``_add(value)`` * ``_add(time, value)`` * ``_add(time, value, seqno)`` :raises SmapException: if the value's type does not match the stream type, or was called with an invalid number of arguments. """ seqno = None if len(args) == 1: time = util.now() if self.milliseconds: time *= 1000 value = args[0] elif len(args) == 2: time, value = args elif len(args) == 3: time, value, seqno = args else: raise SmapException("Invalid add arguments: must be (value), " "(time, value), or (time, value, seqno)") # note that we got data now self.inst.statslog.mark() time = int(time) if not self.milliseconds: time *= 1000 if not self._check_type(value): raise SmapException("Attempted to add " + str(value) + " to Timeseries, but " + "the timeseries type is " + self.__getitem__('Properties')['ReadingType']) if seqno: reading = time, value, seqno else: reading = time, value self["Readings"].append(reading) if not hasattr(self, 'inst'): return # if a timeseries is dirty, we need to republish all of its # metadata before we publish it so stream is right. some of # this may have already been published, in which case it won't # actually do anything. if self.dirty: split_path = util.split_path(getattr(self, 'path')) for i in xrange(0, len(split_path)): path_seg = util.join_path(split_path[:i]) self.inst.reports.publish(path_seg, self.inst.get_collection(path_seg)) rpt = dict(self) rpt['Readings'] = [reading] self.inst.reports.publish(getattr(self, 'path'), rpt) self.dirty = False else: # publish a stripped-down Timeseries object self.inst.reports.publish(getattr(self, 'path'), {'uuid' : self['uuid'], 'Readings' : [reading]})
def __reg_img(path): path = join_path(path) return Image.open(path)
def run(root_path): orig_shapes_path = join_path(root_path, paths_dic['orig_shapes']) csv_path = join_path(root_path, paths_dic['csv_files']) medial_axis_path = join_path(root_path, paths_dic['medial_axised']) heat_maps_path = join_path(root_path, paths_dic['heat_maps']) debug_path = join_path(root_path, paths_dic['debug']) statistics_path = join_path(root_path, paths_dic['statistics']) csv_files = [f for f in listdir(csv_path) if isfile(join(csv_path, f)) and f.endswith('.csv')] shapes_dic = config.shapes_dic stat_file = open_statistics_file(statistics_path) for csv_file in csv_files: x_list, y_list = read_csv(join_path(csv_path, csv_file)) csv_suffix_location = csv_file.find('.csv') number = int(csv_file[0:csv_suffix_location]) if config.run_subset: if number not in config.images_subset: continue image_name = shapes_dic[number] log.info('processing shape ' + image_name) img = util.read_image(join_path(medial_axis_path, util.get_image_name_clean_after_mfd(image_name))) # list of pairs (points) where touches have been made touch_points = list(zip(y_list, x_list)) # list of pairs (points) of the medial axis medial_axis = get_medial_axis(img) orig_image = util.read_image(join_path(orig_shapes_path, image_name)) unf_points = uniform_distribution(orig_image) image = plot_bins(img, x_list, y_list) hexbin_centers = bins_data(image) if config.produce_heat_maps: log.info("generating heat map") bmp_location = image_name.find('.bmp') new_image_name = image_name[0:bmp_location] + '_heat_map.png' util.create_new_path(heat_maps_path) pl.savefig(join_path(heat_maps_path, new_image_name), bbox_inches='tight', dpi=350) pl.close() pct_of_touches_in_radius, medial_axis_percentage, pct_of_medial_axis_and_radius, \ avg_dist_touches_medial_axis, min_avg_dist_rand_points_medial_axis, avg_dist_ratio = [0, 0, 0, 0, 0, 0] if config.run_radius_analysis: log.info("running radius analysis") pct_of_medial_axis_and_radius = analysis.percent_in_range_2(unf_points, medial_axis) pct_of_touches_in_radius = analysis.percent_in_range_2(hexbin_centers, medial_axis) if config.run_avg_dist_analysis: log.info("running avg distance analysis") distances_sample = [] for i in range(config.curr_number_of_iterations): sampled_points = take_sample(unf_points) distances_sample.append( util.find_average_distance_from_medial_axis(sampled_points, medial_axis)) if config.debug_images: debugging_draw_points(img.size, image_name, touch_points, medial_axis, sampled_points, debug_path) medial_axis_percentage = float(len(medial_axis) / len(unf_points) * 100) avg_dist_touches_medial_axis = \ util.find_average_distance_from_medial_axis(hexbin_centers, medial_axis) min_avg_dist_rand_points_medial_axis = min(distances_sample) avg_dist_ratio = min_avg_dist_rand_points_medial_axis / avg_dist_touches_medial_axis stat_row = [number, image_name, medial_axis_percentage, pct_of_medial_axis_and_radius, pct_of_touches_in_radius, avg_dist_touches_medial_axis, min_avg_dist_rand_points_medial_axis, avg_dist_ratio, config.curr_grid_size, config.curr_clicks_threshold, config.curr_sampled_points, config.curr_number_of_iterations, config.curr_radius_threshold] stat_file.write_to_sheet(stat_row) if config.produce_heat_maps: log.info("Saved heat maps to %s" % heat_maps_path) stat_file.save_workbook() log.info("Saved statistics file to %s" % statistics_path) if config.debug_images: log.info("Saved debugging images to %s" % debug_path) log.info('Finished successfully! Exiting...') os.chdir(root_path)
class Reader(object): """Usage >>> r = Reader() >>> r.header = 'relative_path' >>> r.footer = 'message' Image.size = (width, height) """ DEFAULT_IMAGE_MODE = 'RGB' OPACITY_IMAGE_MODE = 'RGBA' DEFAULT_WIDTH = 750 # default ios width DEFAULT_FORMAT = FILE_TYPE_PNG DEFAULT_FONT = join_path('font.{}'.format(FILE_TYPE_TTC)) DEFAULT_FONT_SIZE = 18 MARGIN_LEFT = DEFAULT_FONT_SIZE * 1 MARGIN_RIGHT = DEFAULT_FONT_SIZE * 2 MARGIN_TOP = 8 MARGIN_BOTTOM = 8 RGB_WHITE = (255, 255, 255) RGB_BLACK = (0, 0, 0) RGBA_OPACITY = (0, 0, 0, 0) def __init__(self, width=None, **kwargs): super(Reader, self).__init__() if not width: self.width = self.DEFAULT_WIDTH self.footer_height = 60 self.text_background_color = kwargs.get('text_background_color', self.RGB_WHITE) self.font_size = kwargs.get('font_size', self.RGB_WHITE) self.font = kwargs.get('font', self.DEFAULT_FONT) self.font_color = kwargs.get('font_color', self.RGB_BLACK) def __suggest_width(self, width): if width != self.DEFAULT_WIDTH: sys.stdout.write(u'suggest width use {}px\n'.format( self.DEFAULT_WIDTH)) def __combine_imgs(self, *args): if not args: raise ValueError(u'图片不能为空') widths, new_height = [], 0 for image in args: if not image: continue new_height += image.size[1] widths.append(image.size[0]) assert len(set(widths)) == 1, u'图片宽度不同' width = widths[0] self.__suggest_width(width) size = (width, new_height) combine_img = Image.new(self.OPACITY_IMAGE_MODE, size, self.RGBA_OPACITY) paste_height = 0 for _, image in enumerate(args): combine_img.paste(image, (0, paste_height)) paste_height += image.size[1] # TODO <*****@*****.**>> height over the max height return combine_img @classmethod def combine_img(cls, *args): return cls.__combine_imgs(*args) def __text_to_png(self, text='', font_size=DEFAULT_FONT_SIZE, font_path=DEFAULT_FONT, font_color=RGB_BLACK, background_color=RGB_WHITE, is_header_width=True): if not isinstance(text, unicode): text = unicode(text, 'UTF-8') font = ImageFont.truetype(font_path, font_size) width, height = font.getsize(text) if is_header_width: width = self.width height += self.MARGIN_TOP sys.stdout.write( 'text_to_png establish width:{}px,height:{}px\n'.format( width, height)) image = Image.new(self.DEFAULT_IMAGE_MODE, (width, height), background_color) draw = ImageDraw.Draw(image) draw.text((self.MARGIN_LEFT, self.MARGIN_TOP), text, font=font, fill=font_color) return image def __combine_text(self, text='', font_size=DEFAULT_FONT_SIZE, font_path=DEFAULT_FONT, font_color=RGB_BLACK, background_color=RGB_WHITE): if not text: return None texts = text.split('\n') word_count = (self.width - self.MARGIN_LEFT - self.MARGIN_RIGHT) / font_size print word_count, images = [] text_sections = [self.__chunks(t.strip(), word_count) for t in texts] for text_lines in text_sections: for t in text_lines: image = self.__text_to_png(text=t, font_size=font_size, font_path=font_path, font_color=font_color, background_color=background_color) images.append(image) return self.__combine_imgs(*images) @property def text(self): return self._text @text.setter def text(self, text=''): self._text = self.__combine_text(text) return self @text.getter def text(self): return self._text @property def header(self): return self._header @header.setter def header(self, header_path): self._header = self.__reg_img(header_path) self.width = self.header.size[0] return self @header.getter def header(self): self.__suggest_width(self.header.size[0]) return self._header @property def footer(self): return self._footer @footer.setter def footer(self, text=''): size = (self.width, self.footer_height) self._footer = Image.new(self.DEFAULT_IMAGE_MODE, size, self.RGB_WHITE) return self @footer.getter def footer(self): return self._footer # TODO <*****@*****.**> title @staticmethod def __reg_img(path): path = join_path(path) return Image.open(path) @staticmethod def __chunks(text, setp): for i in xrange(0, len(text), setp): yield text[i:i + setp]
def _add(self, *args): """Add a new reading to this timeseries. This version must only be called from the :py:mod:`twisted` main loop; *i.e.* from a callback added with ``reactor.callFromThread()`` Can be called with 1, 2, or 3 arguments. The forms are * ``_add(value)`` * ``_add(time, value)`` * ``_add(time, value, seqno)`` :raises SmapException: if the value's type does not match the stream type, or was called with an invalid number of arguments. """ seqno = None if len(args) == 1: time = util.now() if self.milliseconds: time *= 1000 value = args[0] elif len(args) == 2: time, value = args elif len(args) == 3: time, value, seqno = args else: raise SmapException("Invalid add arguments: must be (value), " "(time, value), or (time, value, seqno)") # note that we got data now self.inst.statslog.mark() time = int(time) if not self.milliseconds: time *= 1000 if not self._check_type(value): raise SmapException("Attempted to add " + str(value) + " to Timeseries, but " + "the timeseries type is " + self.__getitem__('Properties')['ReadingType']) if seqno: reading = time, value, seqno else: reading = time, value self["Readings"].append(reading) if not hasattr(self, 'inst'): return # if a timeseries is dirty, we need to republish all of its # metadata before we publish it so stream is right. some of # this may have already been published, in which case it won't # actually do anything. if self.dirty: split_path = util.split_path(getattr(self, 'path')) for i in xrange(0, len(split_path)): path_seg = util.join_path(split_path[:i]) self.inst.reports.publish(path_seg, self.inst.get_collection(path_seg)) rpt = dict(self) rpt['Readings'] = [reading] self.inst.reports.publish(getattr(self, 'path'), rpt) self.dirty = False else: # publish a stripped-down Timeseries object self.inst.reports.publish(getattr(self, 'path'), { 'uuid': self['uuid'], 'Readings': [reading] })
def __save_from_view_2_stored__(self, truck_name): data = self.view.read(self.ui) lines = self.translator.view_2_stored(data) path = util.join_path(variables.pre_path__product_value_stored, truck_name, r'pv') self.model.write(lines, path)
def other_fee_save_slot(self): lines = self.other_fee_view.read(self.other_fee_ui) lines = self.other_fee_translator.view_2_stored(lines) path = util.join_path(variables.pre_path_other_fee, variables.file_name_other_fee, variables.postfix_other_fee) self.model.write(lines, path)
def _add_parents(self, path): for i in xrange(0, len(path)): if not self.get_collection(util.join_path(path[:i])): self.add_collection(util.join_path(path[:i]))
def run(before_mfd_folder, shape_name, output_folder): command = "mfd -m %s -md → %s" % (util.join_path( before_mfd_folder, shape_name), output_folder) os.system(command)
def __personal_detail__(self): out_data = [] path_names = util.join_path(variables.names_pre_path, \ variables.file_name_drivers, r'txt') lines_name = self.model.read(path_names) lines_name = util.lines_vaild_data(lines_name) for driver_name in lines_name: out_data1 = {} out_data1[variables.string_sum_driver] = driver_name xls_data = [] driver_name = driver_name.strip() truck_name = util.truck_name_by_driver_name(self.driver_truck_dict, driver_name) if not truck_name: return #truck_name out_data1[variables.string_sum_truck] = truck_name lines = util.combine_path_read(self.model, variables.pre_path__product_value_stored, truck_name, 'pv') #orginal product value data_org_product_value = self.translator.stored_2_xls(lines) xls_data.append(data_org_product_value) #total of product value total_computer_input = self.translator.stored_2_handler(lines) product_value_dict = {} product_value_dict = self.computer.product_value(driver_name, total_computer_input) data_product_value_money = [] util.xls_generate_line(data_product_value_money, '', variables.personal, variables.cooperative) util.xls_generate_line(data_product_value_money, variables.string_product_value, product_value_dict[variables.personal], product_value_dict[variables.cooperative]) util.xls_generate_line(data_product_value_money, variables.string_coe, variables.single_commission, variables.double_commission) util.xls_generate_line(data_product_value_money, variables.string_value, self.computer.money_product_value_single(product_value_dict[variables.personal]), self.computer.money_product_value_double(product_value_dict[variables.cooperative]) ) product_value_total = self.computer.money_product_value(product_value_dict) #total product_value:including single and double out_data1[variables.string_sum_product_value] = product_value_total out_data1[variables.string_sum_tie] = self.computer.money_tie_in_product_value(product_value_total) out_data1[variables.string_sum_commission] = self.computer.money_salary_in_product_value(product_value_total) util.xls_generate_line(data_product_value_money, variables.string_total, product_value_total) xls_data.append(data_product_value_money) data_miles = [] util.xls_generate_line(data_miles, '', variables.light_truck, variables.first_level_heavy_truck, variables.second_level_heavy_truck, variables.third_level_heavy_truck) miles_dict = self.computer.miles(driver_name, total_computer_input) util.xls_generate_line(data_miles, variables.string_miles_single, miles_dict[variables.personal][variables.light_truck], miles_dict[variables.personal][variables.first_level_heavy_truck], miles_dict[variables.personal][variables.second_level_heavy_truck], miles_dict[variables.personal][variables.third_level_heavy_truck]) util.xls_generate_line(data_miles, variables.string_miles_double, miles_dict[variables.cooperative][variables.light_truck], miles_dict[variables.cooperative][variables.first_level_heavy_truck], miles_dict[variables.cooperative][variables.second_level_heavy_truck], miles_dict[variables.cooperative][variables.third_level_heavy_truck]) miles_light = self.computer.miles_level_total(miles_dict, variables.light_truck) miles_first_level = self.computer.miles_level_total(miles_dict, variables.first_level_heavy_truck) miles_second_level = self.computer.miles_level_total(miles_dict, variables.second_level_heavy_truck) miles_third_level = self.computer.miles_level_total(miles_dict, variables.third_level_heavy_truck) util.xls_generate_line(data_miles, variables.string_total, miles_light, miles_first_level, miles_second_level, miles_third_level ) util.xls_generate_line(data_miles, variables.string_oil_subsidy_per_mile, variables.oil_per_mile_by_weight_coe[variables.light_truck], variables.oil_per_mile_by_weight_coe[variables.first_level_heavy_truck], variables.oil_per_mile_by_weight_coe[variables.second_level_heavy_truck], variables.oil_per_mile_by_weight_coe[variables.third_level_heavy_truck] ) util.xls_generate_line(data_miles, variables.string_oil_subsidy, self.computer.oil_n_miles(miles_light, variables.oil_per_mile_by_weight_coe[variables.light_truck]), self.computer.oil_n_miles(miles_first_level, variables.oil_per_mile_by_weight_coe[variables.first_level_heavy_truck]), self.computer.oil_n_miles(miles_second_level, variables.oil_per_mile_by_weight_coe[variables.second_level_heavy_truck]), self.computer.oil_n_miles(miles_third_level, variables.oil_per_mile_by_weight_coe[variables.third_level_heavy_truck]) ) oil_subsidy = self.computer.oil_subsidy_total(miles_dict) util.xls_generate_line(data_miles, variables.string_total, oil_subsidy) xls_data.append(data_miles) data_oil = [] oil_dict = self.computer.oil(driver_name, total_computer_input) util.xls_generate_line(data_oil, variables.string_oil_own_single, oil_dict[variables.personal]) util.xls_generate_line(data_oil, variables.string_oil_own_double, oil_dict[variables.cooperative]) oil_own = self.computer.oil_total_own(oil_dict) util.xls_generate_line(data_oil, variables.string_total, oil_own) oil_saved = self.computer.oil_saved(oil_own, oil_subsidy) #oil saved out_data1[variables.string_sum_remaining_oil] = oil_saved util.xls_generate_line(data_oil, variables.string_oil_saved, oil_saved) util.xls_generate_line(data_oil, variables.string_money_per_oil_liter, variables.money_per_liter) money_oil_saved = self.computer.money_oil_saved(oil_saved) #money_oil_saved out_data1[variables.string_sum_money_oil] = money_oil_saved util.xls_generate_line(data_oil, variables.string_money_oil_saved, money_oil_saved) xls_data.append(data_oil) #other fee path = util.join_path(variables.pre_path_other_fee, variables.file_name_other_fee, variables.postfix_other_fee) lines = self.model.read(path) assert (lines) other_fee_handler_data = self.other_fee_translator.stored_2_handler(lines) other_fee_record = util.other_fee_record_by_name(other_fee_handler_data, driver_name) if other_fee_record: data_other_fee = [] util.xls_generate_line(data_other_fee, variables.other_fee_days_off, float(other_fee_record[variables.other_fee_days_off])) util.xls_generate_line(data_other_fee, variables.string_other_fee_deduction_per_day, variables.money_per_dayoff) money_deduction_days_off = self.other_fee_computer.deduction_days_off(float(other_fee_record[variables.other_fee_days_off])) util.xls_generate_line(data_other_fee, variables.string_other_fee_days_off_deduction, money_deduction_days_off) util.xls_generate_line(data_other_fee, variables.string_other_fee_actual_phone_fee, float(other_fee_record[variables.other_fee_phone_fee])) phone_fee_deduction = self.other_fee_computer.deduction_phone_fee(float(other_fee_record[variables.other_fee_phone_fee])) util.xls_generate_line(data_other_fee, variables.string_other_fee_phone_fee_deduction, phone_fee_deduction) money_phone_fee_ss = self.other_fee_computer.phone_fee_days_off_deduction(phone_fee_deduction, money_deduction_days_off) #phone fee and ss out_data1[variables.string_sum_tel_ss] = money_phone_fee_ss deduction_fee_string = str(other_fee_record[variables.other_fee_deduction]) deduction_fee_float = 0.0 if deduction_fee_string == '': deduction_fee_float = float(0) else: deduction_fee_float = float(deduction_fee_string) out_data1[variables.string_sum_deduction] = deduction_fee_float util.xls_generate_line(data_other_fee, variables.string_other_fee_deduction, deduction_fee_float) deduction_total = self.other_fee_computer.deduction_total(money_phone_fee_ss, deduction_fee_float) util.xls_generate_line(data_other_fee, variables.string_total, deduction_total) out_data1[variables.string_sum_deduction_reason] = other_fee_record[other_fee_comment] util.xls_generate_line(data_other_fee, variables.string_other_fee_comment, other_fee_record[other_fee_comment]) xls_data.append(data_other_fee) out_data1[string_sum_total] = computer.actual_salary(product_value_total, money_oil_saved, out_data1[variables.string_sum_tel_ss], out_data1[variables.string_sum_deduction] ) out_data.append(out_data1) path = util.join_path(variables.pre_path_personal_details_xsl, driver_name, 'xls') self.xsl_model.multi_array_write(path, xls_data, variables.string_personal_detail) return out_data