Example #1
0
def catch_all(path):
    if len(path.split("/")) >= 2:
        head = path.split("/")[0]
        if head in ["css", "js"]:
            path = lib.root + "/static/" + lib.safe_path(path)
            if os.path.exists(path):
                response = Response("", 200)
                response.set_data(lib.read_file(path))
                response.headers[
                    "Content-Type"] = "%s; charset=UTF-8" % lib.guess_mime(
                        path)
                return response
        elif head == "post":
            path = lib.root + "/content/" + lib.safe_path(path[5:])
            if os.path.exists(path):
                response = Response("", 200)
                extension = path.split(".")[-1]
                if extension in ["md", "markdown"]:
                    post = lib.read_post(path)
                    response.set_data(
                        render_template("post.html.jinja", data=post))
                    response.headers[
                        "Content-Type"] = "text/html; charset=UTF-8"
                elif extension in ["png", "jpg", "pdf", "mp3", "mp4"]:
                    response.set_data(lib.read_file(path))
                    response.headers[
                        "Content-Type"] = "%s; charset=UTF-8" % lib.guess_mime(
                            path)
                else:
                    abort(404)
                return response
    abort(404)
Example #2
0
    def read_index(self) -> List[IndexEntry]:
        """
        读取 .git/index 文件并返回 IndexEntry 对象列表
        """
        try:
            data = read_file(os.path.join(self.git_path, 'index'))
        except FileNotFoundError:
            return []
        # 校验文件是否被改动
        digest = hashlib.sha1(data[:-20]).digest()
        assert digest == data[-20:], 'index 文件非法'

        # 获取 index 的头部
        signature, version, num_entries = struct.unpack('!4sLL', data[:12])
        assert signature == b'DIRC', f"签名不合法 {signature}"

        assert version == 2, f"版本不合法 {version}"

        # 解析 index 存储的对象索引
        entry_data = data[12:-20]
        entries = []
        i = 0
        while i + 62 < len(entry_data):
            fields_end = i + 62
            fields = struct.unpack('!LLLLLLLLLL20sH', entry_data[i:fields_end])
            path_end = entry_data.index(b'\x00', fields_end)
            path = entry_data[fields_end:path_end]
            entry = IndexEntry(*(fields + (path.decode(), )))
            entries.append(entry)
            entry_len = ((62 + len(path) + 8) // 8) * 8
            i += entry_len
        assert len(entries) == num_entries
        return entries
def save_post_data_to_csv_by_profile(dir_name, file_name, posts):
    file_data = read_file(dir_name, file_name)
    if file_data[0]:
        print('Successfully opened the _Profile_Posts_Export.csv file')
    else:
        print('Created _Profile_Posts_Export.csv file')

    for post in islice(posts, ceil(p_counts)):
        new_row = {
            '_username': post.owner_username,
            '_media_id': post.mediaid,
            '_short_url': post.shortcode,
            '_date': post.date_local,
            '_date(GMT)': post.date_utc,
            '_caption': post.caption,
            '_comments_count': post.comments,
            '_likes_count': post.likes,
            '_video_views': post.video_view_count,
            '_video_url': post.video_url,
            '_thumbnail_url': post.url,
            '_image_url': post.url,
            # '_location_id': post.location == None and None or post.location['id'],
            # '_location_name':post.location == None and None or post.location['name'],
            # '_location_url': post.location == None and None or post.location['slug'],
            # '_lat': post.location == None and None or post.location['lat'],
            # '_lng': post.location == None and None or post.location['lng']
            '_location_id': post.location,
            '_location_name': post.location,
            '_location_url': post.location,
            '_lat': post.location,
            '_lng': post.location
        }

        write_file(dir_name, file_name, new_row)
Example #4
0
 def prepare(self):
     # 第一次打开时初始化
     # 数据设置目录不存在则创建
     if not os.path.exists(self.dataDir):
         os.mkdir(self.dataDir)
     # 资源快照记录文件最新生成时间
     if os.path.isfile(self.file_resource_list):
         modified_time = int(os.path.getmtime(self.file_resource_list))
         self.resource_list_final_modified_time = time.strftime(
             self.resource_list_final_modified_time_format,
             time.localtime(modified_time))
     '''else:
         lib.write_file(self.file_resource_list, '')
         #print(self.file_resource_list, '创建成功')'''
     # 读取配置文件
     if os.path.isfile(self.file_config):
         config = lib.read_file(self.file_config)
         self.config = json.loads(config)  # 读取配置
     # 创建配置文件
     else:
         self.action_update_file_config()
     # 检查 web 根目录是否填写正确,决定后面的操作
     check = self.checkConfig_webroot()
     if check['code'] > 0:
         #print(check['msg'])
         return False
     self.action_build_dirs_path()
Example #5
0
 def test_res(self):
     tf_raw_state = read_file("test_data/terraform.teststate")
     tf_state = json.loads(tf_raw_state)
     tf_meta = get_tf_metadata(tf_state)
     self.assertEqual(tf_meta["version"], 4)
     self.assertEqual(tf_meta["terraform_version"], "0.12.9")
     self.assertEqual(tf_meta["serial"], 360)
Example #6
0
 def get_local_master_hash(self):
     """
     获取本地的 master hash 值,没有就反回 None
     """
     try:
         master_path = self.get_master_path()
         return read_file(master_path).decode().strip()
     except FileNotFoundError:
         return None
def save_new_posts_by_hashtag(hashtag):
    hashtag = hashtag.replace("#", "")
    posts = Hashtag.from_name(IL.context, hashtag).get_posts()
    posts_sorted_by_time = sorted(posts,
                                  key=lambda p: p.date_utc,
                                  reverse=True)

    file_data = read_file('csvs', 'Hash_Tag_Export.csv')

    if file_data[0]:
        print('Successfully opened the Hash_Tag_Export.csv file')
    else:
        print('Created Hash_Tag_Export.csv file')

    for post in islice(posts_sorted_by_time, ceil(p_counts)):
        new_row = {
            '_Hash_Tag': hashtag,
            '_media_id': post.mediaid,
            '_short_url': post.shortcode,
            '_date': post.date_local,
            '_date(GMT)': post.date_utc,
            '_caption': post.caption,
            '_comments_count': post.comments,
            '_likes_count': post.likes,
            '_video_views': post.video_view_count,
            '_video_url': post.video_url,
            '_thumbnail_url': post.url,
            '_image_url': post.url,
            '_location_id': post.location == None and post.location or None,
            '_location_name': post.location == None and post.location or None,
            '_location_url': post.location == None and post.location or None,
            '_lat': post.location == None and post.location or None,
            '_lng': post.location == None and post.location or None,
            # '_location_id': post.location == None and post.location.id or None,
            # '_location_name':post.location == None and post.location.name or None,
            # '_location_url': post.location == None and post.location.slug or None,
            # '_lat': post.location == None and post.location.lat or None,
            # '_lng': post.location == None and post.location.lng or None,
            '_user_id': post.owner_profile.userid,
            '_username': post.owner_username,
            '_full_name': post.owner_profile.full_name,
            '_profile_pic_url': post.owner_profile.profile_pic_url,
            '_profile_url': post.owner_profile.external_url,
            '_Num_of_Followers': post.owner_profile.followers,
            '_Num_of_Posts': post.owner_profile.mediacount,
            '_Num_Following': post.owner_profile.followees,
            '_Profile_Text': post.owner_profile.biography
        }

        write_file('csvs', 'Hash_Tag_Export.csv', new_row)
Example #8
0
 def diff(self):
     changed, _, _ = self.get_status()
     entries_by_path = {e.path: e for e in self.index.read_index()}
     for i, path in enumerate(changed):
         sha1 = entries_by_path[path].sha1.hex()
         data = self.blob.decompress(sha1)
         index_lines = data.decode().splitlines()
         working_lines = read_file(path).decode().splitlines()
         diff_lines = difflib.unified_diff(index_lines,
                                           working_lines,
                                           '{} (index)'.format(path),
                                           '{} (working copy)'.format(path),
                                           lineterm='')
         for line in diff_lines:
             print(line)
         if i < len(changed) - 1:
             print('-' * 70)
Example #9
0
    def decompress(self, sha1: str) -> Tuple[str, bytes]:
        """
        解析 blob 数据
        """
        path = self.find_object(sha1)
        full_data = zlib.decompress(read_file(path))

        nul_index = full_data.index(b'\x00')
        header = full_data[:nul_index]

        obj_type, size_str = header.decode().split()
        size = int(size_str)
        data = full_data[nul_index + 1:]

        assert size == len(data), f"数据长度应为 {size}, 得到 {len(data)} bytes"

        return obj_type, data
Example #10
0
 def test_request_get(self):
     # create project
     project_id = gen_test_project()
     tf_raw_state = read_file("test_data/terraform.teststate")
     write_key(f"{project_id}/terraform.tfstate", tf_raw_state)
     report = gen_report(project_id)
     print(report)
     # get state
     event = {
         "httpMethod": "GET",
         "pathParameters": {"projectId": project_id},
         "requestContext": {"domainName": "test.local"},
     }
     result = lambda_handler(event, {})
     print(result["body"])
     self.assertEqual(result["statusCode"], 200)
     self.assertTrue(result["body"].startswith("<!doctype"))
Example #11
0
    def decompress(self, sha1: str) -> bytes:
        """
        解析 blob 数据,校验数据类型和大小,防止读取被修改过的文件
        """
        assert self.TYPE, f"类型错误 {self.TYPE}"
        path = self.find_object(sha1)
        full_data = zlib.decompress(read_file(path))

        nul_index = full_data.index(b'\x00')
        header = full_data[:nul_index]

        obj_type, size_str = header.decode().split()
        size = int(size_str)
        data = full_data[nul_index + 1:]

        assert size == len(data), f"数据长度应为 {size}, 得到 {len(data)} bytes"
        assert obj_type == self.TYPE, f"数据类型应为 {self.TYPE},得到 {obj_type}"

        return data
Example #12
0
 def __init__(self, git_path: str = ""):
     if git_path:
         self.git_path = git_path
     else:
         self.git_path = find_path()
     self.work_path = os.path.realpath(os.path.join(self.git_path, ".."))
     self.index = Index(self.git_path)
     self.blob = Blob(self.git_path)
     ignore_file_name = ".gitignore"
     ignore_path = os.path.join(self.work_path, ignore_file_name)
     self.ignore_pattern = set()
     if os.path.exists(ignore_path):
         self.ignore = set(read_file(ignore_path).decode().splitlines())
         for ignore_item in self.ignore:
             if ignore_item.startswith(" patten "):
                 self.ignore_pattern.add(ignore_item[8:])
         self.ignore.add(ignore_file_name)
     else:
         self.ignore = []
     self.ignore -= self.ignore_pattern
def save_unique_comments_by_user(pfn):
    file_data = read_file('csvs', 'Profile_Unique_Likes_n_Comments.csv')
    if file_data[0]:
        print(
            'Successfully opened the Profile_Unique_Likes_n_Comments.csv file')
    else:
        print('Created Profile_Unique_Likes_n_Comments.csv file')

    profile = Profile.from_username(IL.context, pfn)
    posts_sorted_by_time = sorted(profile.get_posts(),
                                  key=lambda p: p.date_utc,
                                  reverse=True)

    for post in islice(posts_sorted_by_time, ceil(p_counts)):
        comments = post.get_comments()
        for comment in comments:
            new_row = {
                '_Profile_Handle': pfn,
                '_user_id': comment.owner.userid,
                '_username': comment.owner.username,
                '_full_name': comment.owner.full_name,
                '_is_private': comment.owner.is_private,
                '_is_verified': comment.owner.is_verified,
                '_Date_of_Last_Like_or_Comment': '',
                '_Total_Comments_N_Likes': comment.likes_count,
                '_Total_Comments': '',
                ' _Total_Likes': comment.likes_count,
                '_profile_pic_url': comment.owner.profile_pic_url,
                '_profile_url': comment.owner.external_url,
                '_Num_of_Followers': comment.owner.followers,
                '_Num_of_Posts': comment.owner.mediacount,
                '_Num_Following': comment.owner.followees,
                '_Profile_Text': comment.owner.biography
            }

            write_file('csvs', 'Profile_Unique_Likes_n_Comments.csv', new_row)
Example #14
0
    def test_request_post(self):
        # create project
        project_id = gen_test_project()

        # post state
        raw_tf = read_file("test_data/terraform.teststate")
        event = {
            "httpMethod": "POST",
            "body": raw_tf,
            "pathParameters": {
                "projectId": project_id
            },
            "requestContext": {
                "domainName": "test.local"
            },
        }
        result = lambda_handler(event, {})
        self.assertEqual(result["statusCode"], 200)
        self.assertTrue(result["body"].startswith("{"))

        s3_data = read_key_or_default(f"{project_id}/terraform.tfstate",
                                      "NONE")
        tf_meta = get_tf_metadata(s3_data, True)
        self.assertNotEqual(tf_meta["terraform_version"], "invalid")
Example #15
0
import logging
import os
import json
from jinja2 import Template
from lib import create_response, read_key_or_default, read_file, get_tf_res, get_tf_metadata

logger = logging.getLogger()
logger.setLevel(os.environ.get('LOG_LEVEL', 'INFO'))

INFO = read_file("templates/project_info.html")
INFO_TEMPLATE = Template(INFO)
DOMAIN = os.environ.get('DOMAIN')


def lambda_handler(event, context):
    project_id = event["pathParameters"]["projectId"]
    logger.info(f"Got request for project {project_id}")

    configfile = f"{project_id}/config.json"
    statefile = f"{project_id}/terraform.tfstate"

    config = json.loads(read_key_or_default(configfile))
    project_name = config["name"]
    logger.info(f"Got request for {project_name} with id {project_id}")

    self_url = "https://" + event["requestContext"]["domainName"]

    # Get existing state or create new
    if event['httpMethod'] == "GET":
        logger.info("Type is GET, send state")
        config = json.loads(read_key_or_default(configfile))
Example #16
0
 def include(self, safeloader, node):
     node = self.construct_scalar(node)
     filepath, sep, key = node.partition(':')
     self._data = yaml.load(read_file(filepath), Loader)
     return self.subkey(key)
Example #17
0
 def read(self, safeloader, node):
     node = self.construct_scalar(node)
     return read_file(node)
from ruamel import yaml
import codecs
from lib import read_file


if __name__ == '__main__':
    if len(sys.argv) < 4:
        print('ERROR: input params error' + str(len(sys.argv)))
        exit(1)

    path = sys.argv[1]
    release_version = sys.argv[2]
    dependency_version = sys.argv[3]
    dependency_range = sys.argv[4].split('/')
    rc_or_final_map = dict()
    files = read_file(path, 'images.yaml')
    for item in files.items():
        is_change=False
        file_name = item[0]
        template = item[1]
        releases = item[1].get('releases')
        for release in releases:
            release_ver = release.get('release-version')
            if release_ver != release_version:
                continue
            for dependency in release.get('dependencies'):
                max_version = dependency.get('max-version')
                if max_version == dependency_version:
                    is_change = True
                    dependency['max-version'] = dependency_range[0]
                    dependency['min-version'] = dependency_range[1]
Example #19
0
def solution(in_file, out_file, solution_f):
    teams, pizzas = read_file(in_file)
    answer = solution_f(teams, pizzas)
    write_result(answer, score_answer(answer, pizzas), out_file)
Example #20
0
# Main program to implement the logic of costs calculation
# Please, read Spec.txt for details of logic, file format etc.

import lib

#file_path = '/home/tonyr/Work/Costs/Data/costs_20121207.csv'
file_path = '/home/nurton/Stat/Costs/'
db_name = "costs"
db_user = "******"
db_pwd = "root"

raw_list = []

db_conn = lib.get_db_connection(db_name, db_user, db_pwd)

# Read data from file and put it in list
raw_list = lib.read_file(file_path)

print raw_list

# Calculate total costs for certain day
day_total = lib.get_day_total(raw_list)

print(day_total)

# Add total day costs to the day's file
#lib.log_to_file(file_path, day_total)

db_conn.close()

Example #21
0
 def read(self, safeloader, node):
     node = self.construct_scalar(node)
     return read_file(node)
Example #22
0
 def include(self, safeloader, node):
     node = self.construct_scalar(node)
     filepath, sep, key = node.partition(':')
     self._data = yaml.load(read_file(filepath), Loader)
     return self.subkey(key)
def train(model_path, corpus_path, state_size=3):
    corpus = lib.read_file(corpus_path)
    text_model = markovify.Text(corpus, state_size=state_size)
    model_json = text_model.to_json()
    lib.write_list_to_file(model_path, [model_json])
Example #24
0
    for i in range(N3):
        while sorted_pizzas[first_available][2]:
            first_available = first_available + 1
            if first_available >= len(pizzas):
                return answer
        next_set = find_next_set(3, sorted_pizzas, first_available)
        if next_set:
            answer.append(next_set)
            if i % 100 == 0:
                print("N3 ", i)

    for i in range(N2):
        while sorted_pizzas[first_available][2]:
            first_available = first_available + 1
            if first_available >= len(pizzas):
                return answer
        next_set = find_next_set(2, sorted_pizzas, first_available)
        if next_set:
            answer.append(next_set)
            if i % 100 == 0:
                print("N2 ", i)

    return answer


if __name__ == '__main__':
    teams, pizzas = read_file("b_little_bit_of_everything.in")
    ans = greedy_solution(teams, pizzas)
    print(ans)
Example #25
0
 def test_res_raw(self):
     tf_raw_state = read_file("test_data/terraform.teststate")
     tf_res = get_tf_res(tf_raw_state, True)
     self.assertEqual(len(tf_res), 1)
Example #26
0
 def test_res(self):
     tf_raw_state = read_file("test_data/terraform.teststate")
     tf_state = json.loads(tf_raw_state)
     tf_res = get_tf_res(tf_state)
     self.assertEqual(len(tf_res), 1)
     self.assertEqual(tf_res[0]["id"], "state_bucket")
Example #27
0
 def test_real(self):
     data = read_file("test_data/terraform.teststate")
     self.assertTrue(data.startswith("{"))
Example #28
0
files_to_process = []

db_conn = lib.get_db_connection(db_name, db_user, db_pwd)
cur = db_conn.cursor()

# Get list of unprocessed file names from DB
rows_affected = cur.execute(DB_QUERY_GET_LIST_OF_UNPROCESSED_FILES)
print "Number of unprocessed FILE_NAMEs in DB:", rows_affected
if rows_affected == 0:
    print 'There is no files in DB.'
else:
    db_result = cur.fetchall() # returns 2-dim array
    for nme in db_result:
        files_to_process.append(list(nme)[0])
print "FILE_NAMEs from DB are:", files_to_process


# Read data from file and put it in list
for file_name in files_to_process:
    raw_list = lib.read_file(file_path + file_name)

    print "raw_list:", raw_list

    lib.load_file_data_to_db(cur, raw_list, file_name)

# Calculate total costs for certain day
#day_total = lib.get_day_total(raw_list)

#print(day_total)
import markovify
import lib

if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('-m',
                        '--model',
                        dest='model',
                        type=str,
                        required=False,
                        help='Model path',
                        default='model.txt')

    args = parser.parse_args()

    print('*' * 50)
    for i in vars(args):
        print(str(i) + ' - ' + str(getattr(args, i)))

    print('*' * 50)

    model_data = lib.read_file(args.model)
    model = markovify.Text.from_json(model_data)
    for _ in range(5):
        while True:
            sentence = model.make_sentence(tries=100,
                                           max_overlap_ratio=0.45,
                                           max_words=20)
            if sentence:
                break
        print(sentence)
Example #30
0
import json
import logging
import os
import urllib.parse
from jinja2 import Template
from lib import create_response, randomString, write_key, read_file,redirect

logger = logging.getLogger()
logger.setLevel(os.environ.get('LOG_LEVEL','INFO'))

DOMAIN = os.environ.get('DOMAIN')
KEY = os.environ.get('KEY')

DEFAULT_STATE = read_file("templates/default.tfstate.template")
PROJECT_FORM = read_file("templates/project_form.html")


def lambda_handler(event, context):

    # Get existing state or create new
    if event['httpMethod'] == "GET":
        logger.info(f"Send form for creation")
        return create_response(PROJECT_FORM,contenttype="text/html")
        
    # update
    if event['httpMethod'] == "POST":
        body_vars = {}
        body = urllib.parse.unquote(event["body"])
        for line in body.split("&"):
            line_data = line.split("=")
            body_vars[line_data[0]]=line_data[1]
Example #31
0
 def _(self, path: str) -> str:
     assert self.TYPE, f"类型错误 {self.TYPE}"
     data = read_file(path)
     return self.compress(data)