예제 #1
0
    def __str__(self):
        string = '%s=' % url_encode(self.get_name())

        if self._value == '':
            string = '%sdeleted; expires=Thu, 01-Jan-1970 00:00:00 GMT' % string
        else:
            string = '%s%s' % (string, url_encode(self._value))

            if self._expire != 0:
                string = '%s; expires=%s' % (string,
                                             time.strftime(
                                                 "%a, %d-%b-%Y %T GMT",
                                                 time.gmtime(self._expire)))

        if self._path:
            string = '%s; path=%s' % (string, self._path)

        if self._domain:
            string = '%s; domain=%s' % (string, self._domain)

        if self._secure:
            string = '%s; secure' % string

        if self._http_only:
            string = '%s; httponly' % string

        return string
예제 #2
0
파일: web.py 프로젝트: FND/irc_log_browser
def search(environ, start_response):
    params = environ.get('QUERY_STRING', '')

    start_response("200 OK", [("Content-Type", "text/html; charset=UTF-8")])

    if not params:
        yield '<form method="GET">'
        yield '<input type="search" name="query">'
        yield '<input type="submit">'
        yield "</form>"
    else:
        query = parse_qs(params)["query"][0]

        # XXX: hard-codes directory hierarchy logic that doesn't belong here
        matches_by_channel = {}
        for channel in os.listdir(ROOT_PATH):
            for conversation in os.listdir(os.path.join(ROOT_PATH, channel)):
                filepath = os.path.join(ROOT_PATH, channel, conversation)
                matches = search_in_file(query, filepath)
                if matches:
                    matches_by_channel[channel] = matches_by_channel.get(channel, {}) # TODO: use defaultdict!?
                    matches_by_channel[channel][conversation] = matches

        if matches_by_channel:
            yield "<dl>"
            for channel in matches_by_channel:
                yield "<dt>%s</dt>" % channel
                for conversation in matches_by_channel[channel]:
                    for no, line in matches_by_channel[channel][conversation]:
                        uri = "/%s/%s#%s" % (url_encode(channel),
                                url_encode(conversation), no)
                        yield '<dd><a href="%s">#%s</a>: %s</dd>' % (uri, no, line)
            yield "</dl>"
    def _fn_google_maps_directions_function(self, event, *args, **kwargs):
        """Function: A Function that takes an Origin and a Destination and returns a Google Maps Link with Directions"""

        log = logging.getLogger(__name__)

        # Base URL to Google Maps
        GOOGLE_MAPS_URL = "https://www.google.com/maps/dir/?api=1"

        def get_function_input(inputs, input_name, optional=False):
            """Given input_name, checks if it defined. Raises ValueError if a mandatory input is None"""
            input = inputs.get(input_name)

            if input is None and optional is False:
                err = "'{0}' is a mandatory function input".format(input_name)
                raise ValueError(err)
            else:
                return input

        try:
            # Get the function inputs:
            inputs = {
                "google_maps_origin":
                get_function_input(kwargs,
                                   "google_maps_origin"),  # text (required)
                "google_maps_destination":
                get_function_input(
                    kwargs, "google_maps_destination"),  # text (required)
            }

            # Create payload dict with inputs
            payload = FunctionPayload(inputs)

            yield StatusMessage("Function Inputs OK")

            # url_encode origin and destination
            origin = url_encode(
                payload.inputs["google_maps_origin"].encode('utf8'))
            destination = url_encode(
                payload.inputs["google_maps_destination"].encode('utf8'))

            yield StatusMessage("Generating Link")

            # Generate Link
            payload.directions_link = "{0}&origin={1}&destination={2}".format(
                GOOGLE_MAPS_URL, origin, destination)

            # Send payload back to Appliance
            results = payload.as_dict()

            log.info("Complete")

            # Produce a FunctionResult with the results
            yield FunctionResult(results)
        except Exception:
            yield FunctionError()
예제 #4
0
def root(environ, start_response):
    start_response("200 OK", [("Content-Type", "text/html; charset=UTF-8")])
    yield "<ul>"
    for channel in determine_channels(ROOT_PATH):
        uri = "/%s" % url_encode(channel)
        yield '<li><a href="%s">%s</a></li>' % (uri, channel)
    yield "</ul>"
예제 #5
0
파일: greader.py 프로젝트: rootart/grbackup
 def auth(self, email, pwd):
     """
     authorization to Google Reader service
     Args:
         email - email address
         pwd - password
     """
     req_data = url_encode(
         {'Email': email,
          'Passwd': pwd,
          'service': 'reader',
          'accountType': 'GOOGLE'})
     req = urllib2.Request(AUTH_URL, data=req_data)
     try:
         resp = urllib2.urlopen(req).read()
         token = re.search('Auth=(\S*)', resp).group(1)
         self._header = {
             'Authorization': 'GoogleLogin auth={token}'.format(
                 token=token)}
     except (urllib2.HTTPError, urllib2.URLError) as exc:
         logging.error("Login Failed: %s", exc)
     except AttributeError:
         logging.error("Token Not Found in the response.",
                       extra={'response': resp})
     self.__auth = True
예제 #6
0
파일: greader.py 프로젝트: wistful/grbackup
    def get_items(self, url, count=20):
        """
        return return items from stream by url
        """
        logging.info('start fetching url %s ', url_unquote(url))
        req_param = {'r': 'n', 'n': count, 'client': 'scroll'}
        continuation = None
        while True:
            if continuation:
                req_param['c'] = continuation
            req_data = url_encode(req_param)
            feed_url = "{url}?{req_data}".format(url=url, req_data=req_data)
            req = urllib2.Request(feed_url, headers=self._header)
            try:
                resp = urllib2.urlopen(req).read()
            except (urllib2.HTTPError, urllib2.URLError) as exc:
                logging.error("Failed getting stream (%s) items: %s",
                              url_unquote(url), exc)
                break
            feed_posts = json.loads(resp)

            for post in feed_posts['items']:
                yield post

            continuation = feed_posts.get('continuation', None)
            if not continuation:
                logging.info('end fetching url %s ', url_unquote(url))
                break
예제 #7
0
파일: greader.py 프로젝트: wistful/grbackup
 def auth(self, email, pwd):
     """
     authorization to Google Reader service
     Args:
         email - email address
         pwd - password
     """
     req_data = url_encode({
         'Email': email,
         'Passwd': pwd,
         'service': 'reader',
         'accountType': 'GOOGLE'
     })
     req = urllib2.Request(AUTH_URL, data=req_data)
     try:
         resp = urllib2.urlopen(req).read()
         token = re.search('Auth=(\S*)', resp).group(1)
         self._header = {
             'Authorization': 'GoogleLogin auth={token}'.format(token=token)
         }
     except (urllib2.HTTPError, urllib2.URLError) as exc:
         logging.error("Login Failed: %s", exc)
     except AttributeError:
         logging.error("Token Not Found in the response.",
                       extra={'response': resp})
     self.__auth = True
예제 #8
0
def drop(revision_url_pattern):
    # Output the data.
    #
    # The data structures are all linked up nicely to one another.  You
    # can get all the LogMessages, and each LogMessage contains all the
    # Contributors involved with that commit; likewise, each Contributor
    # points back to all the LogMessages it contributed to.
    #
    # However, the HTML output is pretty simple right now.  It's not take
    # full advantage of all that cross-linking.  For each contributor, we
    # just create a file listing all the revisions contributed to; and we
    # build a master index of all contributors, each name being a link to
    # that contributor's individual file.  Much more is possible... but
    # let's just get this up and running first.

    for key in LogMessage.all_logs.keys():
        # You could print out all log messages this way, if you wanted to.
        pass
        # print LogMessage.all_logs[key]

    detail_subdir = "detail"
    if not os.path.exists(detail_subdir):
        os.mkdir(detail_subdir)

    index = open('index.html', 'w')
    index.write(html_header('Contributors as of r%d' % LogMessage.max_revnum))
    index.write(index_introduction)
    index.write('<ol>\n')
    # The same contributor appears under multiple keys, so uniquify.
    seen_contributors = {}
    # Sorting alphabetically is acceptable, but even better would be to
    # sort by number of contributions, so the most active people appear at
    # the top -- that way we know whom to look at first for commit access
    # proposals.
    sorted_contributors = Contributor.all_contributors.values()
    sorted_contributors.sort()
    for c in sorted_contributors:
        if not seen_contributors.has_key(c):
            if c.score() > 0:
                if c.is_full_committer:
                    # Don't even bother to print out full committers.  They are
                    # a distraction from the purposes for which we're here.
                    continue
                else:
                    committerness = ''
                    if c.is_committer:
                        committerness = '&nbsp;(partial&nbsp;committer)'
                    urlpath = "%s/%s.html" % (detail_subdir,
                                              c.canonical_name())
                    fname = os.path.join(detail_subdir,
                                         "%s.html" % c.canonical_name())
                    index.write(
                        '<li><p><a href="%s">%s</a>&nbsp;[%s]%s</p></li>\n' %
                        (url_encode(urlpath), c.big_name(html=True),
                         c.score_str(), committerness))
                    c.html_out(revision_url_pattern, fname)
        seen_contributors[c] = True
    index.write('</ol>\n')
    index.write(html_footer())
    index.close()
예제 #9
0
    def canonical_name(self):
        """Return a canonical name for this contributor.  The canonical
    name may or may not be based on the contributor's actual email
    address.

    The canonical name will not contain filename-unsafe characters.

    This method is guaranteed to return the same canonical name every
    time only if no further contributions are recorded from this
    contributor after the first call.  This is because a contribution
    may bring a new form of the contributor's name, one which affects
    the algorithm used to construct canonical names."""
        retval = None
        if self.username:
            retval = self.username
        elif self.email:
            # Take some rudimentary steps to shorten the email address, to
            # make it more manageable.  If this is ever discovered to result
            # in collisions, we can always just use to the full address.
            try:
                at_posn = self.email.index('@')
                first_dot_after_at = self.email.index('.', at_posn)
                retval = self.email[0:first_dot_after_at]
            except ValueError:
                retval = self.email
        elif self.real_name:
            # Last resort: construct canonical name based on real name.
            retval = ''.join(self.real_name.lower().split(' '))
        if retval is None:
            complain('Unable to construct a canonical name for Contributor.',
                     True)
        return url_encode(retval, safe="!#$&'()+,;<=>@[]^`{}~")
예제 #10
0
def channel(environ, start_response):
    channel = environ.get("PATH_INFO", "").lstrip("/")
    dirpath = os.path.join(ROOT_PATH, channel) # TODO: encode special chars (e.g. slashes)

    try:
        entries = os.listdir(dirpath)
    except (OSError, IOError):
        start_response("404 Not Found", [])
        yield "Not Found"
    else:
        start_response("200 OK", [("Content-Type", "text/html; charset=UTF-8")])
        yield "<ul>"
        for entry in entries:
            uri = "/%s/%s" % (url_encode(channel), url_encode(entry))
            yield '<li><a href="%s">%s</a></li>' % (uri, entry)
        yield "</ul>"
예제 #11
0
파일: greader.py 프로젝트: rootart/grbackup
    def get_items(self, url, count=20):
        """
        return return items from stream by url
        """
        logging.info('start fetching url %s ', url_unquote(url))
        req_param = {'r': 'n', 'n': count, 'client': 'scroll'}
        continuation = None
        while True:
            if continuation:
                req_param['c'] = continuation
            req_data = url_encode(req_param)
            feed_url = "{url}?{req_data}".format(url=url, req_data=req_data)
            req = urllib2.Request(feed_url, headers=self._header)
            try:
                resp = urllib2.urlopen(req).read()
            except (urllib2.HTTPError, urllib2.URLError) as exc:
                logging.error("Failed getting stream items: %s", exc)
                break
            feed_posts = json.loads(resp)

            for post in feed_posts['items']:
                yield post

            continuation = feed_posts.get('continuation', None)
            if not continuation:
                logging.info('end fetching url %s ', url_unquote(url))
                break
예제 #12
0
 def login(self, username, password):
     connection = HttpConnection(self.flotrack_domain)
     connection.set_debuglevel(self.debug_level)
     connection.connect()
     # Configure login parameters (this is the content of the HTTP request)
     params = { "LoginForm[username]": username,
                "LoginForm[password]": password,
                "LoginForm[rememberMe]": 1, }
     encoded_params = url_encode(params)
     # Get the HTTP headers to use
     headers = self.get_default_headers(encoded_params)
     del headers["Cookie"]
     # Configure the cookie jar to store the login information
     cookie_request = HttpRequestAdapter(self.flotrack_domain, self.login_url,
                                         headers)
     self.cookie_jar.add_cookie_header(cookie_request)
     # Issue the HTTP request
     request = connection.request("POST", self.login_url, encoded_params, headers)
     response = connection.getresponse()
     if response.status == OK:
         return False
     if response.status == FOUND:
         response.read()
         response.info = lambda: response.msg
         # Extract the login cookies
         self.cookie_jar.extract_cookies(response, cookie_request)
         self.connection = connection
         return True
     raise Exception("Flotrack connection failed during login.")
예제 #13
0
 def get_running_log_params(self, date, route, distance_miles, time_minutes, notes):
     minutes_component = floor(time_minutes)
     seconds_component = (time_minutes - minutes_component) * 60
     date_string = date.isoformat()
     run_info = [("date", date_string),
                 ("log_type", ""),
                 ("log_type", "run"),
                 ("crossTrainType", ""),
                 ("workout", ""),
                 ("workout", ""),
                 ("title", route),
                 ("show_on_menu", 0),
                 ("date", date_string),
                 ("warmup_mins", ""),
                 ("warmup_secs", ""),
                 ("warmup_distance", ""),
                 ("warmup_dist_unit", "miles"),
                 ("warmup_shoe_id", ""),
                 ("add_log", ""),
                 ("interval[0][reps]", ""),
                 ("interval[0][distance]", ""),
                 ("interval[0][dist_unit]", "miles"),
                 ("interval[0][rest]", ""),
                 ("interval[0][rest_unit]", "secs"),
                 ("interval[0][calculate_pace]", 1),
                 ("interval[0][shoe_id]", ""),
                 ("cooldown_mins", ""),
                 ("cooldown_secs", ""),
                 ("cooldown_distance", ""),
                 ("cooldown_dist_unit", "miles"),
                 ("cooldown_shoe_id", ""),
                 ("mins", int(minutes_component)),
                 ("secs", int(seconds_component)),
                 ("distance", distance_miles),
                 ("dist_unit", "miles"),
                 ("calculate_pace", 0),
                 ("calculate_pace", 1),
                 ("feel", ""),
                 ("field1", ""),
                 ("field2", ""),
                 ("shoe_id", ""),
                 ("notes", notes),
                 ("add_log", ""),]
     for i in range(len(run_info)):
         key = run_info[i][0]
         value = run_info[i][1]
         if key != "add_log" and "interval[0]" not in key:
             key = "RunningLogResource[%s]" % key
             run_info[i] = (key, value)
     return url_encode(run_info)
예제 #14
0
    def get_feed(self, feed):
        ''' Read feed with feedparser '''
        if feed.etag:
            parser = fp.parse(feed.url, etag=feed.etag)
            feed.title = parser.feed.title
            feed.etag = parser.etag
        elif feed.modified:
            parser = fp.parse(feed.url, modified=feed.modified)
            # if hasattr(parser.feed, 'title'):
            feed.title = parser.feed.title
            feed.modified = parser.modified
        else:
            parser = fp.parse(feed.url)
            feed.title = parser.feed.title
            if hasattr(
                    parser, 'etag'
            ):  # assumes we only need either the etag or the modified but not both
                feed.etag = parser.etag
            elif hasattr(parser, 'modified'):
                feed.modified = parser.modified

        articles = []
        for f in parser.entries:
            if not self.db_url_exists(f.link):
                article = r.get(DIFFBOT + url_encode(f.link)).json()
                # figure out date
                if 'date' in article and len(
                        article['date'].split()[0]
                ) == 3:  # the second condition checks for an abbreviated weekday name first
                    date = datetime.datetime.strptime(
                        article['date'], '%a, %d %b %Y %H:%M:%S %Z').strftime(
                            '%Y-%m-%d')  # may need captial %b
                else:
                    date = datetime.datetime.now().strftime("%Y-%m-%d")

                # figure out url
                if 'resolved_url' in article:
                    url = article['resolved_url']
                else:
                    url = article['url']
                article_tuple = (date, urlsplit(url)[1], article['title'],
                                 article['text'], url)
                articles.append(article_tuple)
        return articles
예제 #15
0
파일: greader.py 프로젝트: wistful/grbackup
 def subscriptions(self):
     """
     return list of subscriptions
     """
     if not self._subscriptions:
         req_data = url_encode({'output': 'json'})
         url = "{subscriptions_list_url}?{req_data}".format(
             subscriptions_list_url=SUBSCRIPTIONS_LIST_URL,
             req_data=req_data)
         req = urllib2.Request(url, headers=self._header)
         try:
             resp = urllib2.urlopen(req).read()
             self._subscriptions = json.loads(resp)['subscriptions']
         except (urllib2.HTTPError, urllib2.URLError) as exc:
             logging.error("Failed getting subscriptions: %s", exc)
         except KeyError:
             logging.error("Subscriptions not found in the response.",
                           extra={'response': resp})
     return self._subscriptions
예제 #16
0
파일: greader.py 프로젝트: rootart/grbackup
 def subscriptions(self):
     """
     return list of subscriptions
     """
     if not self._subscriptions:
         req_data = url_encode({'output': 'json'})
         url = "{subscriptions_list_url}?{req_data}".format(
             subscriptions_list_url=SUBSCRIPTIONS_LIST_URL,
             req_data=req_data)
         req = urllib2.Request(url, headers=self._header)
         try:
             resp = urllib2.urlopen(req).read()
             self._subscriptions = json.loads(resp)['subscriptions']
         except (urllib2.HTTPError, urllib2.URLError) as exc:
             logging.error("Failed getting subscriptions: %s", exc)
         except KeyError:
             logging.error("Subscriptions not found in the response.",
                           extra={'response': resp})
     return self._subscriptions
예제 #17
0
def generate_rules(app, support, confidence, backend_url, retry_times=3):
    url_params = url_encode({
        'applicationName': app,
        'minSupport': support,
        'minConfidence': confidence
    })
    url = "{}?{}".format(backend_url, url_params)
    request = urllib2.Request(url)

    data = None
    execution_time = None

    for i in xrange(retry_times):
        try:
            start = time.time()
            response = urllib2.urlopen(request)
            data = response.read()
            execution_time = time.time() - start
            break
        except urllib2.URLError as e:
            continue

    return data, execution_time
예제 #18
0
	def get_feed(self, feed):
		''' Read feed with feedparser '''
		if feed.etag:
			parser = fp.parse(feed.url, etag=feed.etag)
			feed.title = parser.feed.title
			feed.etag = parser.etag
		elif feed.modified:
			parser = fp.parse(feed.url, modified=feed.modified)
			# if hasattr(parser.feed, 'title'):
			feed.title = parser.feed.title
			feed.modified = parser.modified
		else:
			parser = fp.parse(feed.url)
			feed.title = parser.feed.title
			if hasattr(parser, 'etag'): # assumes we only need either the etag or the modified but not both
				feed.etag = parser.etag
			elif hasattr(parser, 'modified'):
				feed.modified = parser.modified
		
		articles = []
		for f in parser.entries:
			if not self.db_url_exists(f.link):
				article = r.get(DIFFBOT+url_encode(f.link)).json()
				# figure out date
				if 'date' in article and len(article['date'].split()[0]) == 3: # the second condition checks for an abbreviated weekday name first
					date = datetime.datetime.strptime(article['date'], '%a, %d %b %Y %H:%M:%S %Z').strftime('%Y-%m-%d') # may need captial %b
				else:
					date = datetime.datetime.now().strftime("%Y-%m-%d")

				# figure out url
				if 'resolved_url' in article:
					url = article['resolved_url']
				else:
					url = article['url']
				article_tuple = (date, urlsplit(url)[1], article['title'], article['text'], url)
				articles.append(article_tuple)
		return articles
예제 #19
0
    def _fn_ds_search_function(self, event, *args, **kwargs):
        """Function: Function to send a Query to the Digital Shadows Platform and returns a Python List of the results"""

        log = logging.getLogger(__name__)

        def get_config_option(option_name, optional=False):
            """Given option_name, checks if it is in appconfig. Raises ValueError if a mandatory option is missing"""
            option = self.options.get(option_name)

            if not option and optional is False:
                err = "'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function".format(
                    option_name)
                raise ValueError(err)
            else:
                return option

        def get_function_input(inputs, input_name, optional=False):
            """Given input_name, checks if it defined. Raises ValueError if a mandatory input is None"""
            this_input = inputs.get(input_name)

            if this_input is None and optional is False:
                err = "'{0}' is a mandatory function input".format(input_name)
                raise ValueError(err)
            else:
                return this_input

        try:
            inputs = {
                "ds_search_value": get_function_input(kwargs,
                                                      "ds_search_value")
            }

            # Create payload dict with inputs
            payload = FunctionPayload(inputs)

            yield StatusMessage("Function Inputs OK")

            # Get configs
            api_key = get_config_option("ds_api_key")
            api_secret = get_config_option("ds_api_secret")
            base_url = get_config_option("ds_base_url")

            headers = {
                'content-type': 'application/json; charset=utf-8',
                'Accept': 'application/json'
            }
            basic_auth = HTTPBasicAuth(api_key, api_secret)

            qry_url = "{0}{1}".format(base_url, "/api/search/find")
            ds_filter = {"query": payload.inputs["ds_search_value"]}

            try:
                yield StatusMessage(
                    "Sending POST request to {0}".format(qry_url))
                res = requests.post(qry_url,
                                    json.dumps(ds_filter),
                                    auth=basic_auth,
                                    headers=headers,
                                    verify=True)

                res.raise_for_status()

                if res.status_code == 200:
                    payload.data = json.loads(res.text)["content"]

                    qry = url_encode(
                        payload.inputs["ds_search_value"].encode('utf8'))
                    link = "{0}/search?q={1}&view=List".format(base_url, qry)
                    payload.link = link
                    payload.href = """<a href={0}>Link</a>""".format(link)

                else:
                    payload.success = False
                    raise ValueError(
                        'Request to {0} failed with code {1}'.format(
                            qry_url, res.status_code))

            except requests.exceptions.Timeout:
                raise ValueError('Request to {0} timedout'.format(qry_url))

            except requests.exceptions.TooManyRedirects:
                raise ValueError('A bad url request', qry_url)

            except requests.exceptions.HTTPError as err:
                if err.response.content:
                    custom_error_content = json.loads(err.response.content)
                    raise ValueError(custom_error_content['error']['message'])
                else:
                    raise ValueError(err)
            except requests.exceptions.RequestException as e:
                raise ValueError(e)

            results = payload.as_dict()

            log.info("Complete")

            # Produce a FunctionResult with the results
            yield FunctionResult(results)
        except Exception:
            yield FunctionError()
예제 #20
0
 page = copy(test_page)
 print "Insert: "
 write_with_get("Skill/test.md", "update", page)
 print "Update Tags: "
 page["metadata"]["tags"] = [{
     'slug': 'FPGA',
     'view': 'FPGA'
 }, {
     'slug': 'tag_test',
     'view': 'tag_test'
 }]
 write_with_get("Skill/test.md", "update", page)
 print "Update Authors: "
 page["metadata"]["authors"] = [{
     "slug": "命月天宇",
     "view": url_encode("命月天宇")
 }]
 write_with_get("Skill/test.md", "update", page)
 print "Update Title: "
 page["metadata"]["title"] = {"slug": "Skill-test", "view": "title_test"}
 write_with_get("Skill/test.md", "update", page)
 print "Update Data adn Summary: "
 page["metadata"]["date"] = "2015.05.30 12:00"
 page["metadata"]["summary"] = "summary_test"
 write_with_get("Skill/test.md", "update", page)
 print "Delete: "
 write_with_get("Skill/test.md", "delete", page)
 print "Insert 1: "
 write_with_get("Skill/test.md", "update", test_page)
 print "Insert 2: "
 page = parser.parse("Art/test.md")
예제 #21
0
        print_database(database)
        print "\n"

    parser = Parser()
    wrapper = Wrapper()
    test_page = parser.parse("Skill/test.md")
    test_page["metadata"] = wrapper.wrap(test_page["metadata"])
    writer = Writer(database)
    page = copy(test_page)
    print "Insert: "
    write_with_get("Skill/test.md", "update", page)
    print "Update Tags: "
    page["metadata"]["tags"] = [{'slug': 'FPGA', 'view': 'FPGA'}, {'slug': 'tag_test', 'view': 'tag_test'}]
    write_with_get("Skill/test.md", "update", page)
    print "Update Authors: "
    page["metadata"]["authors"] = [{"slug": "命月天宇", "view": url_encode("命月天宇")}]
    write_with_get("Skill/test.md", "update", page)
    print "Update Title: "
    page["metadata"]["title"] = {"slug": "Skill-test", "view": "title_test"}
    write_with_get("Skill/test.md", "update", page)
    print "Update Data adn Summary: "
    page["metadata"]["date"] = "2015.05.30 12:00"
    page["metadata"]["summary"] = "summary_test"
    write_with_get("Skill/test.md", "update", page)
    print "Delete: "
    write_with_get("Skill/test.md", "delete", page)
    print "Insert 1: "
    write_with_get("Skill/test.md", "update", test_page)
    print "Insert 2: "
    page = parser.parse("Art/test.md")
    page["metadata"] = wrapper.wrap(page["metadata"])