Beispiel #1
0
 def list_file(self, name_tuple_prefix, max_files=None):
     path_prefix = self._get_key_prefix(name_tuple_prefix)
     for obj in islice(oss2.ObjectIteratorV2(self.bucket), 1):
         print(obj.key)
     if max_files is None:
         iter = oss2.ObjectIteratorV2(self.bucket, prefix=path_prefix)
     else:
         iter = islice(
             oss2.ObjectIteratorV2(self.bucket, prefix=path_prefix),
             max_files)
     return map(lambda obj: self._get_name_tuple(obj.key), iter)
Beispiel #2
0
async def _get_aqua_pic():
    '''
    Return a fiexed url.
    '''
    _prefix = '?x-oss-process=image/auto-orient,1/quality,q_100/format,jpg'
    rule_picture_id = re.compile(Auth.bucket_endpoint+Auth.prefix+'/'+'(.*)')

    if not AquaPicture.shuffled_list or (time.time()-44.5*600 > AquaPicture.last_shuffle_time):
        AquaPicture.last_shuffle_time = time.time()
        # shuffle aqua pic list
        for obj in oss2.ObjectIteratorV2(Auth.bucket, prefix=Auth.prefix):
            AquaPicture.shuffled_list.append(Auth.bucket_endpoint+str(obj.key))
        del AquaPicture.shuffled_list[0]
        # delete [0] because it`s path
        random.shuffle(AquaPicture.shuffled_list)

    picture_id = re.match(rule_picture_id, AquaPicture.shuffled_list[0])[1]
    if AquaPicture.shuffled_list[0][-3:] == "gif":
        _url = AquaPicture.shuffled_list[0]
        del AquaPicture.shuffled_list[0]
        return _url, picture_id
    else:
        _url = AquaPicture.shuffled_list[0]+_prefix
        del AquaPicture.shuffled_list[0]
        return _url, picture_id
Beispiel #3
0
async def statsAqua(session) -> None:
    picture_count = 0

    # idk how to count this :( , function len() doesn`t work
    for _ in oss2.ObjectIteratorV2(Auth.bucket, prefix=Auth.prefix):
        picture_count += 1

    __text = "挖藕! 现在有{0}张夸图!".format(picture_count)
    _msg = {
        "type": "text",
        "data": {
            "text": __text
        }
    }
    await session.send(_msg)
Beispiel #4
0
def oss_scandir(dirname: path_type):
    dirname = stringify_path(dirname)
    if not dirname.endswith("/"):
        dirname = dirname + "/"
    bucket, key, access_key_id, access_key_secret, end_point = parse_osspath(
        dirname)
    oss_bucket = _get_oss_bucket(bucket, access_key_id, access_key_secret,
                                 end_point)
    dirname_set = set()
    for obj in oss2.ObjectIteratorV2(oss_bucket, prefix=key):
        rel_path = obj.key[len(key):]
        try:
            inside_dirname, inside_filename = rel_path.split("/", 1)
        except ValueError:
            inside_dirname = None
            inside_filename = rel_path
        if inside_dirname is not None:
            if inside_dirname in dirname_set:
                continue
            dirname_set.add(inside_dirname)
            yield OSSFileEntry(
                os.path.join(dirname, inside_dirname),
                is_dir=True,
                is_file=False,
                stat={
                    "name": os.path.join(dirname, inside_dirname),
                    "type": "directory",
                    "size": 0,
                    "modified_time": -1,
                },
            )
        else:
            yield OSSFileEntry(
                os.path.join(dirname, inside_filename),
                is_dir=False,
                is_file=True,
                stat={
                    "name": os.path.join(dirname, inside_filename),
                    "type": "file",
                    "size": obj.size,
                    "modified_time": obj.last_modified,
                },
            )
Beispiel #5
0
 def ls(self, path: path_type) -> List[path_type]:
     file_list = []
     file_entry = oc.OSSFileEntry(path)
     if not file_entry.is_dir():
         raise OSError("ls for file is not supported")
     else:
         bucket, key, access_key_id, access_key_secret, end_point \
             = oc.parse_osspath(path)
         oss_bucket = oss2.Bucket(auth=oss2.Auth(
             access_key_id=access_key_id,
             access_key_secret=access_key_secret),
                                  endpoint=end_point,
                                  bucket_name=bucket,
                                  connect_timeout=_oss_time_out)
         for obj in oss2.ObjectIteratorV2(oss_bucket, prefix=key):
             if obj.key.endswith('/'):
                 continue
             obj_path = rf"oss://{bucket}/{obj.key}"
             file_list.append(obj_path)
     return file_list
Beispiel #6
0
def oss_isdir(path: path_type):
    """
    OSS has no concept of directories, but we define
    a ossurl is dir, When there is at least one object
    at the ossurl that is the prefix(end with char "/"),
    it is considered as a directory.
    """
    dirname = stringify_path(path)
    if not dirname.endswith("/"):
        dirname = dirname + "/"
    bucket, key, access_key_id, access_key_secret, end_point = parse_osspath(
        dirname)
    oss_bucket = _get_oss_bucket(bucket, access_key_id, access_key_secret,
                                 end_point)
    isdir = False
    for obj in oss2.ObjectIteratorV2(oss_bucket, prefix=key, max_keys=2):
        if obj.key == key:
            continue
        isdir = True
        break
    return isdir
Beispiel #7
0
    with open(rebuild_file, 'r') as FOBJ:
        new_sha = json.load(FOBJ)
    with open('sha256-old.json', 'r') as FOBJ:
        old_sha = json.load(FOBJ)

    dif = new_sha.keys() - old_sha.keys()
    with open('sha256.diff', 'w', encoding='utf-8') as FOBJ:
        for i in iter(dif):
            FOBJ.write(i + '\n')


if __name__ == '__main__':
    sha256_to_files = {}
    err_files = []
    r_oss = OssOperation()
    for obj in oss2.ObjectIteratorV2(bucket, prefix=config.remote_base_dir):
        obj = obj.key
        if obj[-1] == '/':  # 判断obj为文件夹。
            continue
        sha256 = get_remote_sha256(obj)
        if not sha256:
            err_files.append(obj)
        else:
            sha256_to_files[obj[11:]] = sha256
    with open(rebuild_file, 'w') as fobj:
        json.dump(sha256_to_files, fobj)
    r_oss.encrypt_and_upload_files(rebuild_file, "sha256/%s.json" % config.remote_base_dir[:-1], compare_sha256_before_uploading=True, storage_class='Standard')
    print(err_files)
    print("无sha256的文件总数:" + str(len(err_files)))
    if config.SCT_Send_Key:
        sct_push("[rebuild-sha256]重建完成", "#### sha256.json已重建完成,请登录服务器检查")
Beispiel #8
0
import oss2
from oss_utils.bucket import learn_oss_bucket, my_secret, my_key, endpoint, bucket_name
authv2 = oss2.AuthV2(my_key, my_secret)

bucketv2 = oss2.Bucket(authv2, endpoint, bucket_name)
bucketv2.put_object("05232055.txt", b"abcdef05232055")
it = oss2.ObjectIteratorV2(bucketv2)
non_archived_small_files = []
for o in it:

    if o.size < 1024 * 1024 and o.storage_class == "Standard":

        filename = o.key
        non_archived_small_files.append(filename)
        result = bucketv2.get_object(filename)
        print(result.read())