def _test_groupcheckin(self): # FIXME: can't find maya_render.####.iff return file_path = "./maya_render.####.iff" range = FileRange(1, 12) # create new test files expanded_paths = FileGroup.expand_paths(file_path, range) for expanded_path in expanded_paths: file = open(expanded_path, 'w') file.write("file: %s" % expanded_path) file.close() # checkin the frames file_types = ["main"] file_paths = [file_path] checkin = FileGroupCheckin(self.person, file_paths, file_types, range, \ column="frames") checkin.execute() # delete the test files for expanded_path in expanded_paths: os.unlink(expanded_path)
def get_file_info(xml, file_objects, sobject, snapshot, show_versionless=False, is_list=False): info = {} #TODO: {'file_type': [file_type]: [path], 'base_type': [base_type]: [file|directory|sequence]} if is_list: info = [] else: repo_info = {} info['_repo'] = repo_info nodes = xml.get_nodes("snapshot/file") for node in nodes: type = Xml.get_attribute(node, "type") file_code = Xml.get_attribute(node, "file_code") file_object = file_objects.get(file_code) if not file_object: if isinstance(info, dict): info[type] = ThumbWdg.get_no_image() else: info.append((type, ThumbWdg.get_no_image())) Environment.add_warning( "No file object", "No file object found for file code '%s'" % file_code) continue file_name = file_object.get_full_file_name() web_dir = sobject.get_web_dir(snapshot, file_object=file_object) # handle a range if it exists file_range = file_object.get_value("file_range") if file_range: from pyasm.biz import FileGroup, FileRange file_range = FileRange.get(file_range) file_names = FileGroup.expand_paths(file_name, file_range) # just check the first frame if file_names: file_name = file_names[0] path = "%s/%s" % (web_dir, file_name) if isinstance(info, dict): info[type] = path lib_dir = sobject.get_lib_dir(snapshot, file_object=file_object) repo_info[type] = "%s/%s" % (lib_dir, file_name) else: info.append((type, path)) return info
def _check_in_png(my, snapshot, sobject, render, pat, final_pat, render_dir, filenames): icon_path = final_path = '' icon_creator = None file_range = my.render_context.get_frame_range() for name in filenames: if pat.match(name): final_name = pat.sub(final_pat, name) shutil.move(render_dir + "/" + name, render_dir + "/" + final_name) final_path = "%s/%s" % (render_dir, final_name) icon_creator = IconCreator(final_path) icon_creator.create_icons() if icon_creator: icon_path = icon_creator.get_icon_path() gen_icon_path = re.sub("(.*_icon\.)(\d{4})(\.png)", r"\1####\3", icon_path) gen_final_path = re.sub("(.*\.)(\d{4})(\.png)", r"\1####\3", final_path) types = ["main", "icon"] paths = [gen_final_path, gen_icon_path] # check that all the files exist try: for path in paths: FileGroup.check_paths(path, file_range) except FileException, e: print( "The frame range of layer [%s] probably\ does not match the shot's frame range [%s]. %s" \ %(sobject.get_value('name'), file_range.get_key(), e)) # FIXME: this is a little redundant, but it works count = 0 file_paths = FileGroup.expand_paths(paths[0], file_range) for file_path in file_paths: if not os.path.exists(file_path): break count += 1 file_range.frame_end = count
def get_dir_info(dir, skip_dir_details=False, file_range=None): '''Finds the disk size of a path''' info = {} count = 0 dir_size = 0 if dir.find("###") != -1: dir_size = 0 file_type = 'sequence' if file_range: from pyasm.biz import FileGroup file_paths = FileGroup.expand_paths(dir, file_range) for file_path in file_paths: # gets total size of sequence dir_size += os.path.getsize(file_path) elif not os.path.exists(dir): dir_size = 0 file_type = 'missing' elif os.path.islink(dir): count = 0 dir_size = 0 dir_size += os.path.getsize(dir) file_type = 'link' elif os.path.isdir(dir): # this part is too slow if not skip_dir_details: for (path, dirs, files) in os.walk(unicode(dir)): for file in files: filename = os.path.join(path, file) if os.path.islink(filename): # ignore links pass else: try: dir_size += os.path.getsize(filename) except: continue count += 1 file_type = 'directory' else: dir_size = os.path.getsize(dir) count = 1 file_type = 'file' info['size'] = dir_size info['count'] = count info['file_type'] = file_type return info
def get_file_info(xml, file_objects, sobject, snapshot, show_versionless=False, is_list=False, protocol='http'): info = {} #TODO: {'file_type': [file_type]: [path], 'base_type': [base_type]: [file|directory|sequence]} if is_list: info = [] else: repo_info = {} info['_repo'] = repo_info nodes = xml.get_nodes("snapshot/file") for node in nodes: type = Xml.get_attribute(node, "type") file_code = Xml.get_attribute(node, "file_code") file_object = file_objects.get(file_code) if not file_object: if isinstance(info, dict): info[type] = ThumbWdg.get_no_image() else: info.append((type, ThumbWdg.get_no_image())) Environment.add_warning("No file object", "No file object found for file code '%s'" % file_code) continue file_name = file_object.get_full_file_name() web_dir = sobject.get_web_dir(snapshot, file_object=file_object) # handle a range if it exists file_range = file_object.get_value("file_range") if file_range: from pyasm.biz import FileGroup, FileRange file_range = FileRange.get(file_range) file_names = FileGroup.expand_paths(file_name, file_range) # just check the first frame if file_names: file_name = file_names[0] path = "%s/%s" % (web_dir, file_name) if protocol != "file": path = urllib.pathname2url(path) if isinstance(info, dict): info[type] = path lib_dir = sobject.get_lib_dir(snapshot, file_object=file_object) repo_info[type] = "%s/%s" % (lib_dir, file_name) else: info.append((type, path)) return info
def get_info(my, dirname, basename): location = my.kwargs.get("location") # get some info about the file path = "%s/%s" % (dirname, basename) snapshot = my.snapshots.get(path) file_range = None if FileGroup.is_sequence(path) and snapshot: file_range = snapshot.get_file_range() #start_frame = file_range.get_frame_start() #end_frame = file_range.get_frame_end() if location == 'server': my.info = Common.get_dir_info(path, file_range=file_range) else: my.info = {} return my.info
def get_info(self, dirname, basename): location = self.kwargs.get("location") # get some info about the file path = "%s/%s" % (dirname, basename) snapshot = self.snapshots.get(path) file_range = None if FileGroup.is_sequence(path) and snapshot: file_range = snapshot.get_file_range() #start_frame = file_range.get_frame_start() #end_frame = file_range.get_frame_end() if location == 'server': self.info = Common.get_dir_info(path, file_range=file_range) else: self.info = {} return self.info
def get_display(my): web = WebContainer.get_web() search_key = web.get_form_value("search_key") widget = Widget() sobject = Search.get_by_search_key(search_key) table = TableWdg( sobject.get_search_type(), "render" ) table.set_sobject(sobject) widget.add(table) # get all of the snapshots with a context render sobject_snapshot = Snapshot.get_latest_by_sobject(sobject,"render") if sobject_snapshot: search_type = sobject.get_search_type() search_id = sobject.get_value('search_id') render_snapshots = Snapshot.get_by_search_type(search_type,search_id,"render") table = TableWdg("sthpw/snapshot") table.set_sobjects(render_snapshots) widget.add(table) widget.add(HtmlElement.h3("Rendered Frames")) if sobject_snapshot: widget.add("Render version: v%0.3d" % sobject_snapshot.get_value("version") ) # get latest snapshot of the render renders = Render.get_all_by_sobject(sobject) if not renders: widget.add("<h4>No renders found</h4>") return widget render = renders[0] snapshot = Snapshot.get_latest_by_sobject(render,"render") if snapshot == None: widget.add("<h4>No snapshots found</h4>") return widget # get the images web_dir = snapshot.get_web_dir() lib_dir = snapshot.get_lib_dir() xml = snapshot.get_xml_value("snapshot") file_nodes = xml.get_nodes("snapshot/file") file_name = icon_file_name = '' frame_range = icon_frame_range = None for file_node in file_nodes: if Xml.get_attribute(file_node, 'type') == 'main': file_name, frame_range = my._get_frame_info(file_node, sobject) if Xml.get_attribute(file_node, 'type') == 'icon': icon_file_name, icon_frame_range = my._get_frame_info(file_node, sobject) file_names = [file_name] icon_file_names = [icon_file_name] if "##" in file_name: file_names = FileGroup.expand_paths(file_name, frame_range) if "##" in icon_file_name: icon_file_names = FileGroup.expand_paths(icon_file_name, \ icon_frame_range) div = DivWdg() for k in range(len(file_names)): file_name = file_names[k] # ignore frames that don't exist lib_path = "%s/%s" % (lib_dir, file_name) if not os.path.exists(lib_path): continue try: icon_file_name = icon_file_names[k] except IndexError: icon_file_name = file_names[k] file_path = "%s/%s" % (web_dir, file_name) icon_file_path = "%s/%s" % (web_dir, icon_file_name) img = HtmlElement.img(icon_file_path) img.set_attr("width", "60") img.add_event("onmouseover","hint_bubble.show(event,'Ctrl + Click to open in new window')") href = HtmlElement.href(img, file_path) div.add(href) widget.add(div) widget.add( "<h3>Render History</h3>" ) widget.add( my.get_render_history(renders) ) return widget
def get_display(my): web = WebContainer.get_web() search_key = web.get_form_value("search_key") widget = Widget() sobject = Search.get_by_search_key(search_key) # get all of the snapshots with a context render sobject_snapshot = Snapshot.get_latest_by_sobject(sobject,"backplate") widget.add(HtmlElement.h3("BackPlate Frames")) if not sobject_snapshot: widget.add("No backplates checked in") return widget # get the images web_dir = sobject_snapshot.get_web_dir() xml = sobject_snapshot.get_xml_value("snapshot") file_nodes = xml.get_nodes("snapshot/file") file_name = icon_file_name = '' frame_range = icon_frame_range = None for file_node in file_nodes: if Xml.get_attribute(file_node, 'type') == 'main': file_name, frame_range = my._get_frame_info(file_node, sobject) if Xml.get_attribute(file_node, 'type') == 'icon': icon_file_name, icon_frame_range = my._get_frame_info(file_node, sobject) file_names = [file_name] icon_file_names = [icon_file_name] if "##" in file_name: file_names = FileGroup.expand_paths(file_name, frame_range) if "##" in icon_file_name: icon_file_names = FileGroup.expand_paths(icon_file_name, \ icon_frame_range) div = DivWdg() for k in range(len(file_names)): file_name = file_names[k] try: icon_file_name = icon_file_names[k] except IndexError: icon_file_name = file_names[k] file_path = "%s/%s" % (web_dir, file_name) icon_file_path = "%s/%s" % (web_dir, icon_file_name) img = HtmlElement.img(icon_file_path) img.add_event("onmouseover","hint_bubble.show(event,'Ctrl + Click to open in new window')") href = HtmlElement.href(img, file_path) div.add("%0.4d" % (k+1)) div.add(href) if k != 0 and (k+1) % 4 == 0: div.add("<br/>") div.add("<br/>") widget.add(div) return widget
def execute(my): filenames = my.kwargs.get("filenames") upload_dir = Environment.get_upload_dir() base_dir = upload_dir update_mode = my.kwargs.get("update_mode") search_type = my.kwargs.get("search_type") key = my.kwargs.get("key") relative_dir = my.kwargs.get("relative_dir") if not relative_dir: project_code = Project.get_project_code() search_type_obj = SearchType.get(search_type) table = search_type_obj.get_table() relative_dir = "%s/%s" % (project_code, table) server = TacticServerStub.get() parent_key = my.kwargs.get("parent_key") category = my.kwargs.get("category") keywords = my.kwargs.get("keywords") update_data = my.kwargs.get("update_data") extra_data = my.kwargs.get("extra_data") if extra_data: extra_data = jsonloads(extra_data) else: extra_data = {} # TODO: use this to generate a category category_script_path = my.kwargs.get("category_script_path") """ ie: from pyasm.checkin import ExifMetadataParser parser = ExifMetadataParser(path=file_path) tags = parser.get_metadata() date = tags.get("EXIF DateTimeOriginal") return date.split(" ")[0] """ if not SearchType.column_exists(search_type, "name"): raise TacticException( 'The Ingestion puts the file name into the name column which is the minimal requirement. Please first create a "name" column for this sType.' ) input_prefix = update_data.get('input_prefix') non_seq_filenames = [] # For sequence mode, take all filenames, and regenerate the filenames based on the function "find_sequences" if update_mode == "sequence": non_seq_filenames_dict, seq_digit_length = my.find_sequences( filenames) # non_seq_filenames is a list of filenames that are stored in the None key, # which are the filenames that are not part of a sequence, or does not contain # a sequence pattern. non_seq_filenames = non_seq_filenames_dict[None] # delete the None key from list so filenames can be used in the latter for loop del non_seq_filenames_dict[None] filenames = non_seq_filenames_dict.keys() if filenames == []: raise TacticException( 'No sequences are found in files. Please follow the pattern of [filename] + [digits] + [file extension (optional)]. Examples: [abc_1001.png, abc_1002.png] [abc.1001.mp3, abc.1002.mp3] [abc_100_1001.png, abc_100_1002.png]' ) for count, filename in enumerate(filenames): # Check if files should be updated. # If so, attempt to find one to update. # If more than one is found, do not update. if update_mode in ["true", "True"]: # first see if this sobjects still exists search = Search(search_type) search.add_filter("name", filename) if relative_dir and search.column_exists("relative_dir"): search.add_filter("relative_dir", relative_dir) sobjects = search.get_sobjects() if len(sobjects) > 1: sobject = None elif len(sobjects) == 1: sobject = sobjects[0] else: sobject = None elif update_mode == "sequence": if not FileGroup.is_sequence(filename): raise TacticException( 'Please modify sequence naming to have at least three digits.' ) search = Search(search_type) search.add_filter("name", filename) if relative_dir and search.column_exists("relative_dir"): search.add_filter("relative_dir", relative_dir) sobjects = search.get_sobjects() if sobjects: sobject = sobjects[0] else: sobject = None else: sobject = None # Create a new file if not sobject: sobject = SearchType.create(search_type) sobject.set_value("name", filename) if relative_dir and sobject.column_exists("relative_dir"): sobject.set_value("relative_dir", relative_dir) # extract metadata #file_path = "%s/%s" % (base_dir, File.get_filesystem_name(filename)) if update_mode == "sequence": first_filename = non_seq_filenames_dict.get(filename)[0] last_filename = non_seq_filenames_dict.get(filename)[-1] file_path = "%s/%s" % (base_dir, first_filename) else: file_path = "%s/%s" % (base_dir, filename) # TEST: convert on upload try: convert = my.kwargs.get("convert") if convert: message_key = "IngestConvert001" cmd = ConvertCbk(**convert) cmd.execute() except Exception, e: print "WARNING: ", e if not os.path.exists(file_path): raise Exception("Path [%s] does not exist" % file_path) # get the metadata from this image if SearchType.column_exists(search_type, "relative_dir"): if category and category not in ['none', None]: from pyasm.checkin import ExifMetadataParser parser = ExifMetadataParser(path=file_path) tags = parser.get_metadata() date = tags.get("EXIF DateTimeOriginal") if not date: date_str = "No-Date" else: date_str = str(date) # this can't be parsed correctly by dateutils parts = date_str.split(" ") date_str = parts[0].replace(":", "-") date_str = "%s %s" % (date_str, parts[1]) from dateutil import parser orig_date = parser.parse(date_str) if category == "by_day": date_str = orig_date.strftime("%Y/%Y-%m-%d") elif category == "by_month": date_str = orig_date.strftime("%Y-%m") elif category == "by_week": date_str = orig_date.strftime("%Y/Week-%U") full_relative_dir = "%s/%s" % (relative_dir, date_str) sobject.set_value("relative_dir", full_relative_dir) if parent_key: parent = Search.get_by_search_key(parent_key) if parent: sobject.set_sobject_value(sobject) for key, value in update_data.items(): if input_prefix: key = key.replace('%s|' % input_prefix, '') if SearchType.column_exists(search_type, key): if value: sobject.set_value(key, value) """ if SearchType.column_exists(search_type, "keywords"): if keywords: sobject.set_value("keywords", keywords) """ for key, value in extra_data.items(): if SearchType.column_exists(search_type, key): sobject.set_value(key, value) """ if category: if SearchType.column_exists(search_type, "category"): sobject.set_value("category", category) if SearchType.column_exists(search_type, "relative_dir"): full_relative_dir = "%s/%s" % (relative_dir, category) sobject.set_value("relative_dir", category) """ sobject.commit() search_key = sobject.get_search_key() # use API to check in file process = my.kwargs.get("process") if not process: process = "publish" if process == "icon": context = "icon" else: context = "%s/%s" % (process, filename.lower()) if update_mode == "sequence": pattern_expr = re.compile('^.*(\d{%d})\..*$' % seq_digit_length) m_first = re.match(pattern_expr, first_filename) m_last = re.match(pattern_expr, last_filename) # for files without extension # abc_1001, abc.1123_1001 if not m_first: no_ext_expr = re.compile('^.*(\d{%d})$' % seq_digit_length) m_first = re.match(no_ext_expr, first_filename) m_last = re.match(no_ext_expr, last_filename) # using second last index , to grab the set right before file type groups_first = m_first.groups() if groups_first: range_start = int(m_first.groups()[0]) groups_last = m_last.groups() if groups_last: range_end = int(m_last.groups()[0]) file_range = '%s-%s' % (range_start, range_end) file_path = "%s/%s" % (base_dir, filename) server.group_checkin(search_key, context, file_path, file_range, mode='uploaded') else: server.simple_checkin(search_key, context, filename, mode='uploaded') percent = int((float(count) + 1) / len(filenames) * 100) print "checking in: ", filename, percent msg = { 'progress': percent, 'description': 'Checking in file [%s]' % filename, } server.log_message(key, msg, status="in progress")
def _handle_files(self, snapshot, widget, upstream, recursive=True): web_dir = snapshot.get_web_dir() xml = snapshot.get_xml_value("snapshot") # handle files files = xml.get_nodes("snapshot/file") for file in files: file_code = Xml.get_attribute(file, "file_code") file_type = Xml.get_attribute(file, "type") file_range = Xml.get_attribute(file, "file_range") #file_range = "1-4/1" dir = snapshot.get_client_lib_dir(file_type=file_type) lib_dir = snapshot.get_lib_dir(file_type=file_type) open_button = IconButtonWdg( "Explore: %s" % dir, IconWdg.LOAD, False) if dir == lib_dir: open_button.add_behavior({'type':'click_up', 'cbjs_action': '''var applet = spt.Applet.get(); spt.alert('You are not allowed to browse directories on a web server.'); '''}) else: open_button.add_behavior({'type':'click_up', 'dir' : dir, 'cbjs_action': ''' var applet = spt.Applet.get(); var dir = bvr.dir; applet.open_explorer(dir);'''}) open_button.add_class('small') open_button.add_style('float: left') widget.add(open_button) if file_range: file_name = Xml.get_attribute(file, "name") widget.add("%s [code = %s, type = %s]" % (file_name, file_code, file_type)) widget.add(HtmlElement.br(2)) # display all of the paths file_names = FileGroup.expand_paths( file_name, FileRange.get(file_range) ) for file_name in file_names: #link = HtmlElement.href(file_name, "%s/%s" % (web_dir, file_name), target="_blank" ) link = SpanWdg(file_name) link.add_color("color", "color") widget.add(link) widget.add(HtmlElement.br()) else: thumb = DependencyThumbWdg() thumb.set_show_filename(True) thumb.set_sobject(snapshot) thumb.set_icon_size(15) thumb.set_image_link_order([file_type]) thumb.set_option('detail', 'false') widget.add(SpanWdg(thumb, css='small')) widget.add("[code = %s, type = %s]" % ( file_code, file_type)) widget.add(HtmlElement.br()) block = DivWdg() block.add_style("margin-left: 30px") block.add_style("margin-top: 10px") nodes = xml.get_nodes("snapshot/file[@file_code='%s']/ref" % file_code) widget.add(HtmlElement.br(clear="all")) # handle sub refs for node in nodes: self._handle_ref_node(node, block, upstream, recursive) block.add(HtmlElement.br()) if nodes: widget.add(block) widget.add(HtmlElement.br()) files = xml.get_nodes("snapshot/unknown_ref") if files: widget.add(HtmlElement.b("Unknown ref.")) for file in files: block = DivWdg() block.add_style("margin-left: 30px") block.add_style("margin-top: 10px") block.add( IconWdg( "Unknown", IconWdg.UNKNOWN) ) path = Xml.get_attribute(file, "path") block.add(path) widget.add(block)
def get_display(my): web = WebContainer.get_web() search_key = web.get_form_value("search_key") widget = Widget() sobject = Search.get_by_search_key(search_key) table = TableWdg(sobject.get_search_type(), "render") table.set_sobject(sobject) widget.add(table) # get all of the snapshots with a context render sobject_snapshot = Snapshot.get_latest_by_sobject(sobject, "render") if sobject_snapshot: search_type = sobject.get_search_type() search_id = sobject.get_value('search_id') render_snapshots = Snapshot.get_by_search_type( search_type, search_id, "render") table = TableWdg("sthpw/snapshot") table.set_sobjects(render_snapshots) widget.add(table) widget.add(HtmlElement.h3("Rendered Frames")) if sobject_snapshot: widget.add("Render version: v%0.3d" % sobject_snapshot.get_value("version")) # get latest snapshot of the render renders = Render.get_all_by_sobject(sobject) if not renders: widget.add("<h4>No renders found</h4>") return widget render = renders[0] snapshot = Snapshot.get_latest_by_sobject(render, "render") if snapshot == None: widget.add("<h4>No snapshots found</h4>") return widget # get the images web_dir = snapshot.get_web_dir() lib_dir = snapshot.get_lib_dir() xml = snapshot.get_xml_value("snapshot") file_nodes = xml.get_nodes("snapshot/file") file_name = icon_file_name = '' frame_range = icon_frame_range = None for file_node in file_nodes: if Xml.get_attribute(file_node, 'type') == 'main': file_name, frame_range = my._get_frame_info(file_node, sobject) if Xml.get_attribute(file_node, 'type') == 'icon': icon_file_name, icon_frame_range = my._get_frame_info( file_node, sobject) file_names = [file_name] icon_file_names = [icon_file_name] if "##" in file_name: file_names = FileGroup.expand_paths(file_name, frame_range) if "##" in icon_file_name: icon_file_names = FileGroup.expand_paths(icon_file_name, \ icon_frame_range) div = DivWdg() for k in range(len(file_names)): file_name = file_names[k] # ignore frames that don't exist lib_path = "%s/%s" % (lib_dir, file_name) if not os.path.exists(lib_path): continue try: icon_file_name = icon_file_names[k] except IndexError: icon_file_name = file_names[k] file_path = "%s/%s" % (web_dir, file_name) icon_file_path = "%s/%s" % (web_dir, icon_file_name) img = HtmlElement.img(icon_file_path) img.set_attr("width", "60") img.add_event( "onmouseover", "hint_bubble.show(event,'Ctrl + Click to open in new window')") href = HtmlElement.href(img, file_path) div.add(href) widget.add(div) widget.add("<h3>Render History</h3>") widget.add(my.get_render_history(renders)) return widget
def get_display(my): web = WebContainer.get_web() search_key = web.get_form_value("search_key") widget = Widget() sobject = Search.get_by_search_key(search_key) # get all of the snapshots with a context render sobject_snapshot = Snapshot.get_latest_by_sobject(sobject, "backplate") widget.add(HtmlElement.h3("BackPlate Frames")) if not sobject_snapshot: widget.add("No backplates checked in") return widget # get the images web_dir = sobject_snapshot.get_web_dir() xml = sobject_snapshot.get_xml_value("snapshot") file_nodes = xml.get_nodes("snapshot/file") file_name = icon_file_name = '' frame_range = icon_frame_range = None for file_node in file_nodes: if Xml.get_attribute(file_node, 'type') == 'main': file_name, frame_range = my._get_frame_info(file_node, sobject) if Xml.get_attribute(file_node, 'type') == 'icon': icon_file_name, icon_frame_range = my._get_frame_info( file_node, sobject) file_names = [file_name] icon_file_names = [icon_file_name] if "##" in file_name: file_names = FileGroup.expand_paths(file_name, frame_range) if "##" in icon_file_name: icon_file_names = FileGroup.expand_paths(icon_file_name, \ icon_frame_range) div = DivWdg() for k in range(len(file_names)): file_name = file_names[k] try: icon_file_name = icon_file_names[k] except IndexError: icon_file_name = file_names[k] file_path = "%s/%s" % (web_dir, file_name) icon_file_path = "%s/%s" % (web_dir, icon_file_name) img = HtmlElement.img(icon_file_path) img.add_event( "onmouseover", "hint_bubble.show(event,'Ctrl + Click to open in new window')") href = HtmlElement.href(img, file_path) div.add("%0.4d" % (k + 1)) div.add(href) if k != 0 and (k + 1) % 4 == 0: div.add("<br/>") div.add("<br/>") widget.add(div) return widget
def execute(my): filenames = my.kwargs.get("filenames") upload_dir = Environment.get_upload_dir() base_dir = upload_dir update_mode = my.kwargs.get("update_mode") search_type = my.kwargs.get("search_type") key = my.kwargs.get("key") relative_dir = my.kwargs.get("relative_dir") if not relative_dir: project_code = Project.get_project_code() search_type_obj = SearchType.get(search_type) table = search_type_obj.get_table() relative_dir = "%s/%s" % (project_code, table) server = TacticServerStub.get() parent_key = my.kwargs.get("parent_key") category = my.kwargs.get("category") keywords = my.kwargs.get("keywords") update_data = my.kwargs.get("update_data") extra_data = my.kwargs.get("extra_data") if extra_data: extra_data = jsonloads(extra_data) else: extra_data = {} # TODO: use this to generate a category category_script_path = my.kwargs.get("category_script_path") """ ie: from pyasm.checkin import ExifMetadataParser parser = ExifMetadataParser(path=file_path) tags = parser.get_metadata() date = tags.get("EXIF DateTimeOriginal") return date.split(" ")[0] """ if not SearchType.column_exists(search_type, "name"): raise TacticException('The Ingestion puts the file name into the name column which is the minimal requirement. Please first create a "name" column for this sType.') input_prefix = update_data.get('input_prefix') non_seq_filenames = [] # For sequence mode, take all filenames, and regenerate the filenames based on the function "find_sequences" if update_mode == "sequence": non_seq_filenames_dict, seq_digit_length = my.find_sequences(filenames) # non_seq_filenames is a list of filenames that are stored in the None key, # which are the filenames that are not part of a sequence, or does not contain # a sequence pattern. non_seq_filenames = non_seq_filenames_dict[None] # delete the None key from list so filenames can be used in the latter for loop del non_seq_filenames_dict[None] filenames = non_seq_filenames_dict.keys() if filenames == []: raise TacticException('No sequences are found in files. Please follow the pattern of [filename] + [digits] + [file extension (optional)]. Examples: [abc_1001.png, abc_1002.png] [abc.1001.mp3, abc.1002.mp3] [abc_100_1001.png, abc_100_1002.png]') for count, filename in enumerate(filenames): # Check if files should be updated. # If so, attempt to find one to update. # If more than one is found, do not update. if update_mode in ["true", "True"]: # first see if this sobjects still exists search = Search(search_type) search.add_filter("name", filename) if relative_dir and search.column_exists("relative_dir"): search.add_filter("relative_dir", relative_dir) sobjects = search.get_sobjects() if len(sobjects) > 1: sobject = None elif len(sobjects) == 1: sobject = sobjects[0] else: sobject = None elif update_mode == "sequence": if not FileGroup.is_sequence(filename): raise TacticException('Please modify sequence naming to have at least three digits.') search = Search(search_type) search.add_filter("name", filename) if relative_dir and search.column_exists("relative_dir"): search.add_filter("relative_dir", relative_dir) sobjects = search.get_sobjects() if sobjects: sobject = sobjects[0] else: sobject = None else: sobject = None # Create a new file if not sobject: sobject = SearchType.create(search_type) sobject.set_value("name", filename) if relative_dir and sobject.column_exists("relative_dir"): sobject.set_value("relative_dir", relative_dir) # extract metadata #file_path = "%s/%s" % (base_dir, File.get_filesystem_name(filename)) if update_mode == "sequence": first_filename = non_seq_filenames_dict.get(filename)[0] last_filename = non_seq_filenames_dict.get(filename)[-1] file_path = "%s/%s" % (base_dir, first_filename) else: file_path = "%s/%s" % (base_dir, filename) # TEST: convert on upload try: convert = my.kwargs.get("convert") if convert: message_key = "IngestConvert001" cmd = ConvertCbk(**convert) cmd.execute() except Exception, e: print "WARNING: ", e if not os.path.exists(file_path): raise Exception("Path [%s] does not exist" % file_path) # get the metadata from this image if SearchType.column_exists(search_type, "relative_dir"): if category and category not in ['none', None]: from pyasm.checkin import ExifMetadataParser parser = ExifMetadataParser(path=file_path) tags = parser.get_metadata() date = tags.get("EXIF DateTimeOriginal") if not date: date_str = "No-Date" else: date_str = str(date) # this can't be parsed correctly by dateutils parts = date_str.split(" ") date_str = parts[0].replace(":", "-") date_str = "%s %s" % (date_str, parts[1]) from dateutil import parser orig_date = parser.parse(date_str) if category == "by_day": date_str = orig_date.strftime("%Y/%Y-%m-%d") elif category == "by_month": date_str = orig_date.strftime("%Y-%m") elif category == "by_week": date_str = orig_date.strftime("%Y/Week-%U") full_relative_dir = "%s/%s" % (relative_dir, date_str) sobject.set_value("relative_dir", full_relative_dir) if parent_key: parent = Search.get_by_search_key(parent_key) if parent: sobject.set_sobject_value(sobject) for key, value in update_data.items(): if input_prefix: key = key.replace('%s|'%input_prefix, '') if SearchType.column_exists(search_type, key): if value: sobject.set_value(key, value) """ if SearchType.column_exists(search_type, "keywords"): if keywords: sobject.set_value("keywords", keywords) """ for key, value in extra_data.items(): if SearchType.column_exists(search_type, key): sobject.set_value(key, value) """ if category: if SearchType.column_exists(search_type, "category"): sobject.set_value("category", category) if SearchType.column_exists(search_type, "relative_dir"): full_relative_dir = "%s/%s" % (relative_dir, category) sobject.set_value("relative_dir", category) """ sobject.commit() search_key = sobject.get_search_key() # use API to check in file process = my.kwargs.get("process") if not process: process = "publish" if process == "icon": context = "icon" else: context = "%s/%s" % (process, filename.lower()) if update_mode == "sequence": pattern_expr = re.compile('^.*(\d{%d})\..*$'%seq_digit_length) m_first = re.match(pattern_expr, first_filename) m_last = re.match(pattern_expr, last_filename) # for files without extension # abc_1001, abc.1123_1001 if not m_first: no_ext_expr = re.compile('^.*(\d{%d})$'%seq_digit_length) m_first = re.match(no_ext_expr, first_filename) m_last = re.match(no_ext_expr, last_filename) # using second last index , to grab the set right before file type groups_first = m_first.groups() if groups_first: range_start = int(m_first.groups()[0]) groups_last = m_last.groups() if groups_last: range_end = int(m_last.groups()[0]) file_range = '%s-%s' % (range_start, range_end) file_path = "%s/%s" % (base_dir, filename) server.group_checkin(search_key, context, file_path, file_range, mode='uploaded') else: server.simple_checkin(search_key, context, filename, mode='uploaded') percent = int((float(count)+1) / len(filenames)*100) print "checking in: ", filename, percent msg = { 'progress': percent, 'description': 'Checking in file [%s]' % filename, } server.log_message(key, msg, status="in progress")