def test_link(self): """ test Org links """ self.assertEqual( "[[/link/][description]]", OrgFormat.link("/link/", "description"), "format error link+description" ) self.assertEqual("[[/link/]]", OrgFormat.link("/link/"), "format error link") self.assertEqual("[[/link%20link/]]", OrgFormat.link("/link link/"), "quote error")
def test_link(self): """ test Org links """ self.assertEqual("[[/link/][description]]", OrgFormat.link("/link/", "description"), "format error link+description") self.assertEqual("[[/link/]]", OrgFormat.link("/link/"), "format error link") self.assertEqual("[[/link%20link/]]", OrgFormat.link("/link link/"), "quote error")
def test_link(self): self.assertEqual(OrgFormat.link("http://github.org/novoid/memacs"), '[[http://github.org/novoid/memacs]]') self.assertEqual( OrgFormat.link("http://github.org/novoid/memacs with space"), '[[http://github.org/novoid/memacs%20with%20space]]') self.assertEqual( OrgFormat.link("http://github.org/novoid/memacs", "Memacs Repository"), '[[http://github.org/novoid/memacs][Memacs Repository]]')
def test_link(self): self.assertEqual( OrgFormat.link("http://github.org/novoid/memacs"), u'[[http://github.org/novoid/memacs]]') self.assertEqual( OrgFormat.link("http://github.org/novoid/memacs with space"), u'[[http://github.org/novoid/memacs%20with%20space]]') self.assertEqual( OrgFormat.link("http://github.org/novoid/memacs", "Memacs Repository"), u'[[http://github.org/novoid/memacs][Memacs Repository]]')
def __handle_file(self, photo_file, filename): """ checks if file is an image, try to get exif data and write to org file """ logging.debug("handling file %s", filename) # check if file is an image: if imghdr.what(filename) != None: datetime = get_exif_datetime(filename) if datetime == None: logging.debug("skipping: %s has no EXIF information", filename) else: try: datetime = time.strptime(datetime, "%Y:%m:%d %H:%M:%S") timestamp = OrgFormat.datetime(datetime) output = OrgFormat.link(filename, photo_file) properties = OrgProperties(photo_file + timestamp) self._writer.write_org_subitem(timestamp=timestamp, output=output, properties=properties) except ValueError as e: logging.warning("skipping: Could not parse " + \ "timestamp for %s : %s", filename, e)
def __write_file(self, file, link, timestamp): """ write entry to org file (omit replacement of spaces in file names) """ output = OrgFormat.link(link="file:" + link, description=file, replacespaces=False) properties = None if not self._args.omit_drawers: # we need optional data for hashing due it can be, that more # than one file have the same timestamp properties = OrgProperties(data_for_hashing=output) self._writer.write_org_subitem(timestamp=timestamp, output=output, properties=properties)
def __get_item_data(self, item): """ gets information out of <item>..</item> @return: output, note, properties, tags variables for orgwriter.append_org_subitem """ try: # logging.debug(item) properties = OrgProperties() guid = item['id'] if not guid: logging.error("got no id") unformatted_link = item['link'] short_link = OrgFormat.link(unformatted_link, "link") # if we found a url in title # then append the url in front of subject if re.search("http[s]?://", item['title']) is not None: output = short_link + ": " + item['title'] else: output = OrgFormat.link(unformatted_link, item['title']) note = item['description'] # converting updated_parsed UTC --> LOCALTIME # Karl 2018-09-22 this might be changed due to: # DeprecationWarning: To avoid breaking existing software # while fixing issue 310, a temporary mapping has been # created from `updated_parsed` to `published_parsed` if # `updated_parsed` doesn't exist. This fallback will be # removed in a future version of feedparser. timestamp = OrgFormat.datetime( time.localtime(calendar.timegm(item['updated_parsed']))) properties.add("guid", guid) except KeyError: logging.error("input is not a RSS 2.0") sys.exit(1) tags = [] # Karl 2018-09-22 this might be changed due to: # DeprecationWarning: To avoid breaking existing software # while fixing issue 310, a temporary mapping has been created # from `updated_parsed` to `published_parsed` if # `updated_parsed` doesn't exist. This fallback will be # removed in a future version of feedparser. dont_parse = [ 'title', 'description', 'updated', 'summary', 'updated_parsed', 'link', 'links' ] for i in item: logging.debug(i) if i not in dont_parse: if (type(i) == str or type(i) == str) and \ type(item[i]) == str and item[i] != "": if i == "id": i = "guid" properties.add(i, item[i]) else: if i == "tags": for tag in item[i]: logging.debug("found tag: %s", tag['term']) tags.append(tag['term']) return output, note, properties, tags, timestamp