Beispiel #1
0
def display_newpictures(objid):
    pictures = []
    obj = models.t_product_develop_ing.objects.get(id__exact=objid)
    if obj.MainSKU is None or obj.MainSKU.strip(
    ) == '' or obj.SourcePicPath2 is None or obj.SourcePicPath2.strip() == '':
        #pictures = models.t_product_pictures.objects.filter(TradeID=objid,SourcePicPath__isnull = False)
        auth = oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET)
        bucket = oss2.Bucket(auth, ENDPOINT, BUCKETNAME_NOMAINSKU)
        #pictures = oss2.ObjectIterator(bucket)

        for object_info in oss2.ObjectIterator(bucket, prefix='%s/' % (objid)):
            pictures.append(
                '%s%s.%s/%s' %
                (PREFIX, BUCKETNAME_NOMAINSKU, ENDPOINT_OUT, object_info.key))

    else:
        MainSKU = obj.MainSKU
        path_az = re.sub('[^a-zA-Z]', '', MainSKU)  #去掉字母
        path_09 = re.sub("\D", "", MainSKU)
        bucket_name = 'fancyqube-%s' % (path_az.lower())
        oss2auth = oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET)
        bucket = oss2.Bucket(oss2auth, ENDPOINT, bucket_name)
        #加上子SKU
        path = path_az
        for s0 in path_09:
            path = u'%s/%s' % (path, s0)

        path = u'%s/%s' % (path, obj.MainSKU)

        for object_info in oss2.ObjectIterator(bucket, prefix='%s/' % (path)):
            pictures.append(
                '%s%s.%s/%s' %
                (PREFIX, bucket_name, ENDPOINT_OUT, object_info.key))

    return {'pictures': pictures}
Beispiel #2
0
def run_fileLoad(workQueue, queueLock):
  clearBuffer()
  bucket = oss2.Bucket(oss2.Auth(os.getenv('OSS2_ACCESS_KEY_ID'), os.getenv('OSS2_ACCESS_KEY_SECRET')), 'http://oss-cn-qingdao.aliyuncs.com', 'runffphoto')
  # Retrive project folder list
  proj_folder_list = oss2.ObjectIterator(bucket, prefix=aliyun_images, delimiter='/')
  for projInfo in proj_folder_list:
    print projInfo.key
    if projInfo.key != aliyun_images:
      configPath = projInfo.key+'config.txt'
      localConfigPath = '/usr/share/openalpr/runtime_data/postprocess/us.patterns'
      if bucket.object_exists(configPath):
        # Fill in the Queue
        queueLock.acquire()
        object_list = oss2.ObjectIterator(bucket, prefix=projInfo.key)
        object_list_length = 0
        for objectInfo in object_list:
          if os.path.splitext(objectInfo.key)[1] == '.jpg':
            workQueue.put(objectInfo.key)
            object_list_length += 1
        if object_list_length > 0:
          #copy config.txt file to aplr config
          bucket.get_object_to_file(configPath, localConfigPath)
        queueLock.release()
        # Wait for queue to empty
        if object_list_length != 0:
          while not workQueue.empty():
            time.sleep(5)
            pass
  print "run_fileLoad Finished"
Beispiel #3
0
 def list_object(self, bucket, prefix=None):
     '''列举bucket下的文件
     '''
     if prefix is None:
         return enumerate(oss2.ObjectIterator(bucket))
     else:
         return enumerate(oss2.ObjectIterator(bucket, prefix=prefix))
Beispiel #4
0
 def show_bucket_file(self, bucket):
     """
     展示bucket下的备份文件夹
     下的名字,大小,数量
     :param bucket: 
     :return: 
     """
     for b in oss2.ObjectIterator(bucket, delimiter='/'):
         num = 0
         size = 0
         for obj in oss2.ObjectIterator(bucket, prefix=b.key,
                                        delimiter='/'):
             num = num + 1
             #size = size + bucket.get_object_meta(obj.key).content_length
         print b.key, num
def list_dir(bucket, _dir):
    #print('\n'.join(info.name for info in oss2.BucketIterator(service)))
    for obj in oss2.ObjectIterator(bucket):
        if _dir:
            if obj.key.find('.') != -1:
                continue
        print obj.key
Beispiel #6
0
    def test_object_iterator(self):
        prefix = self.random_key('/')
        object_list = []
        dir_list = []

        # 准备文件
        for i in range(20):
            object_list.append(prefix + random_string(16))
            self.bucket.put_object(object_list[-1], random_bytes(10))

        # 准备目录
        for i in range(5):
            dir_list.append(prefix + random_string(5) + '/')
            self.bucket.put_object(dir_list[-1] + random_string(5), random_bytes(3))

        # 验证
        objects_got = []
        dirs_got = []
        for info in oss2.ObjectIterator(self.bucket, prefix, delimiter='/', max_keys=4):
            if info.is_prefix():
                dirs_got.append(info.key)
            else:
                objects_got.append(info.key)

                result = self.bucket.head_object(info.key)
                self.assertEqual(result.last_modified, info.last_modified)

        self.assertEqual(sorted(object_list), objects_got)
        self.assertEqual(sorted(dir_list), dirs_got)

        delete_keys(self.bucket, object_list)
Beispiel #7
0
    def upload(self, path_direct):
        print("**********   上传  *******")
        bucket_input = input('请输入要传入的bucket名:   ')
        print("**************************")
        print("    上传目录下所有文件:")
        dirs = os.listdir(path_direct)
        for file in dirs:
            print(file)
        print("***************************")

        filename = input('请输入要上传的文件名: ')
        cloud_name = input('请输入云端文件名:   ')
        bucket = oss2.Bucket(oss2.Auth(self.access_key_id,
                                       self.access_key_secret),
                             self.endpoint,
                             bucket_name=bucket_input)
        with open(oss2.to_unicode(filename), 'rb') as f:
            bucket.put_object(cloud_name, f)
        meta = bucket.get_object_meta(cloud_name)
        if meta:
            print("     上传成功")
            print("     云端所有文件:")
            for i in oss2.ObjectIterator(bucket):
                print(i.key)
        else:
            print("     上传失败")
Beispiel #8
0
	def cleanup(self):
		"""Clean up old backups."""
		now = datetime.now() + timedelta(hours = 7)
		for rec in self.filtered("days_to_keep"):
			with rec.cleanup_log():
				oldest = self.filename(now - timedelta(days=rec.days_to_keep))

				if rec.method == "local":
					for name in iglob(os.path.join(rec.folder,
												   "*.dump.zip")):
						if os.path.basename(name) < oldest:
							os.unlink(name)

				elif rec.method == "sftp":
					with rec.sftp_connection() as remote:
						for name in remote.listdir(rec.folder):
							if (name.endswith(".dump.zip") and
									os.path.basename(name) < oldest):
								remote.unlink('%s/%s' % (rec.folder, name))

				elif rec.method == "oss":
					bucket = rec.oss_connection()						
					for obj in oss2.ObjectIterator(bucket):
						name = str(obj.key)
						if (name.startswith(rec.oss_folder) and name.endswith(".dump.zip") and os.path.basename(name) < oldest):
							bucket.delete_object(obj.key)
Beispiel #9
0
def iter_files():
    marker = current_progress['marker']

    start_marker = cnf.get('START_MARKER', '')
    if start_marker > marker:
        marker = start_marker

    end_marker = cnf.get('END_MARKER', None)

    for file_object in oss2.ObjectIterator(oss2_bucket,
                                           prefix=cnf['PREFIX'],
                                           marker=marker):

        if end_marker and file_object.key > end_marker:
            break

        yield file_object

        current_progress['total_n'] += 1
        current_progress['total_size'] += file_object.size
        current_progress['marker'] = file_object.key

        if current_progress['total_n'] % 10000 == 0:
            store_progress()

    store_progress()
Beispiel #10
0
def listImgInOss():
    auth = oss2.Auth(accessKeyId,accessKeySecret)
    bucket = oss2.Bucket(auth,endpoint,bucketname)
    print bucket.get_bucket_location().location
    for b in islice(oss2.ObjectIterator(bucket), 10):
        print(b.key)
        print b.size
def clean_and_delete_bucket(bucket):
    # check if bucket is in versioning status
    try:
        result = bucket.get_bucket_info()
        if result.versioning_status in [oss2.BUCKET_VERSIONING_ENABLE, oss2.BUCKET_VERSIONING_SUSPEND]:
            all_objects = bucket.list_object_versions()
            for obj in all_objects.versions:
                bucket.delete_object(obj.key, params={'versionId': obj.versionid})
    except:
        pass
    
    # list all upload_parts to delete
    up_iter = oss2.MultipartUploadIterator(bucket)
    for up in up_iter:
        bucket.abort_multipart_upload(up.key, up.upload_id)

    # list all objects to delete
    obj_iter = oss2.ObjectIterator(bucket)
    for obj in obj_iter:
        bucket.delete_object(obj.key)
    
    # list all live channels to delete
    for ch_iter in oss2.LiveChannelIterator(bucket):
        bucket.delete_live_channel(ch_iter.name)

    # delete_bucket
    bucket.delete_bucket()
Beispiel #12
0
    def to_excel(self, request, queryset):
        path = MEDIA_ROOT + 'download_xls/' + request.user.username
        mkdir_p(MEDIA_ROOT + 'download_xls')
        os.popen('chmod 777 %s' % (MEDIA_ROOT + 'download_xls'))
        mkdir_p(path)
        os.popen('chmod 777 %s' % path)

        w = Workbook()
        sheet = w.add_sheet('actionable_order')
        sheet.write(0, 0, u'店铺')
        sheet.write(0, 1, u'店铺SKU')
        sheet.write(0, 2, u'订单编号')
        sheet.write(0, 3, u'购买日期')
        sheet.write(0, 4, u'付款日期')
        sheet.write(0, 5, u'承诺日期')
        sheet.write(0, 6, u'超出承诺日期的天数')
        sheet.write(0, 7, u'购买的数量')
        sheet.write(0, 8, u'已配送数量')
        sheet.write(0, 9, u'待配送数量')
        sheet.write(0, 10, u'运输方式')
        sheet.write(0, 11, u'更新时间')

        # 写数据
        row = 0
        for qs in queryset:
            row = row + 1
            purchase_date = qs.purchase_date.strftime('%Y-%m-%d %H:%M')
            payments_date = qs.payments_date.strftime('%Y-%m-%d %H:%M')
            promise_date = qs.promise_date.strftime('%Y-%m-%d %H:%M')
            refresh_time = qs.refresh_time.strftime('%Y-%m-%d %H:%M')
            excel_content_list = (qs.shop_name, qs.sku, qs.order_id,
                                  purchase_date, payments_date, promise_date,
                                  qs.days_past_promise, qs.quantity_purchased,
                                  qs.quantity_shipped, qs.quantity_to_ship,
                                  qs.ship_service_level, refresh_time)
            column = 0
            for content in excel_content_list:
                sheet.write(row, column, content)
                column += 1
        filename = request.user.username + '_' + datetime.datetime.now(
        ).strftime('%Y%m%d%H%M%S') + '.xls'
        w.save(path + '/' + filename)
        os.popen(r'chmod 777 %s' % (path + '/' + filename))

        # 上传oss对象
        auth = oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET)
        bucket = oss2.Bucket(auth, ENDPOINT, BUCKETNAME_XLS)
        bucket.create_bucket(oss2.BUCKET_ACL_PUBLIC_READ)
        # 删除现有的
        for object_info in oss2.ObjectIterator(
                bucket,
                prefix='%s/%s_' %
            (request.user.username, request.user.username)):
            bucket.delete_object(object_info.key)
        bucket.put_object(u'%s/%s' % (request.user.username, filename),
                          open(path + '/' + filename))
        messages.success(
            request, u'%s%s.%s/%s/%s' % (PREFIX, BUCKETNAME_XLS, ENDPOINT_OUT,
                                         request.user.username, filename) +
            u':成功导出,可点击Download下载到本地............................。')
Beispiel #13
0
def send(event, context):
    host = "http://websocket.serverless.fun"
    url = "/notify"
    userId = json.loads(event.decode("utf-8"))['headers']['x-ca-deviceid']

    # 获取链接对象
    for obj in oss2.ObjectIterator(ossClient):
        if obj.key != userId:
            req_post = request.Request(host=host,
                                       protocol=constant.HTTP,
                                       url=url,
                                       method="POST",
                                       time_out=30000,
                                       headers={'x-ca-deviceid': obj.key})
            req_post.set_body(json.dumps({
                "from": userId,
                "message": base64.b64decode(json.loads(event.decode("utf-8"))['body']).decode("utf-8")
            }))
            req_post.set_content_type(constant.CONTENT_TYPE_STREAM)
            result = apigatewayClient.execute(req_post)
            print(result)
            if result[0] != 200:
                # 删除链接记录
                ossClient.delete_object(obj.key)
    return {
        'isBase64Encoded': 'false',
        'statusCode': '200',
        'body': {
            'status': "ok"
        },
    }
Beispiel #14
0
    def _list_paths(self, path_info):
        import oss2

        for blob in oss2.ObjectIterator(
            self.oss_service, prefix=path_info.path
        ):
            yield blob.key
Beispiel #15
0
def compare_md5():
    for oss_obj in oss2.ObjectIterator(oss_bucket, prefix='upyun'):
        if db.ok.find_one({'aliyun': oss_obj.key}):
            continue

        try:
            md5_aliyun = oss_bucket.head_object(
                oss_obj.key).headers['Content-MD5']
        except:
            db.wrong_ali.insert_one({'url': oss_obj.key})
            logger.error('error occurs when get %s Content-Md5', oss_obj.key)
            continue
        md5_aliyun = binascii.hexlify(
            base64.decodebytes(md5_aliyun.encode('utf-8'))).decode()

        url = upyun_base_url + oss_obj.key[6:]
        try:
            md5_upyun = requests.head(url, auth=(UPYUN,
                                                 UPYUN)).headers['Content-Md5']
        except:
            db.wrong_upy.insert_one({'url': url})
            logger.error('error occurs when get %s Content-Md5', url)
            continue
        if md5_aliyun != md5_upyun:
            db.pic_diff.insert_one({'aliyun': oss_obj.key, 'upyun': url})
            logger.error('different md5 %s %s', oss_obj.key, url)
            continue
        db.ok.insert_one({'aliyun': oss_obj.key})
        logger.debug('%s is ok', oss_obj.key)
Beispiel #16
0
 def genfilelist(self, staticfiledir):
     myownlist = []
     for b in oss2.ObjectIterator(self.bucket,
                                  prefix=staticfiledir,
                                  max_keys=500):
         myownlist.append(b.key)
     return myownlist
Beispiel #17
0
def auto_upgrade():
    if is_windows_system():
        import time
        import os
        import sys
        import win32api
        import oss2
        access_key_id = 'LTAIH6IHuMj6Fq2h'
        access_key_secret = 'N5eWsbw8qBkMfPREkgF2JnTsDASelM'
        endpoint_out = 'oss-cn-shanghai.aliyuncs.com'
        bucket_name_api_version = 'fancyqube-apiversion'
        print 'this file is: %s' % str(sys.argv[0])
        logger.debug('this file is: %s' % str(sys.argv[0]))
        auth = oss2.Auth(access_key_id, access_key_secret)
        bucket = oss2.Bucket(auth, endpoint_out, bucket_name_api_version)
        for filename2 in oss2.ObjectIterator(bucket, prefix='amazon_upload_product-'):
            if sys.argv[0].split('\\')[-1] < filename2.key:
                print 'The file in oss: %s  is  newer than current file,we will download it and run it.'  % str(filename2.key)
                logger.debug('The file in oss: %s  is  newer than current file,we will download it and run it.'  % str(filename2.key))
                bucket.get_object_to_file(filename2.key,LOCAL_PATH + filename2.key)
                if win32api.ShellExecute(0, 'open', LOCAL_PATH + filename2.key, '','',3)>32:
                    print 'Run new file and close the current one.'
                    logger.debug('Run new file and close the current one.')
                    os._exit(0)
                else:
                    print 'Download the new file, but can not run it!'
                    logger.error('Download the new file, but can not run it!')
            else:
                print 'The file in oss: %s  is older  than current file,we will ignore it.' % str(filename2.key)
                logger.debug('The file in oss: %s  is older  than current file,we will ignore it.' % str(filename2.key))
    else:
        pass
Beispiel #18
0
    def to_excel(self, request, objs):
        from xlwt import *
        path = MEDIA_ROOT + 'download_xls/' + request.user.username
        mkdir_p(MEDIA_ROOT + 'download_xls')
        os.popen('chmod 777 %s' % (MEDIA_ROOT + 'download_xls'))

        mkdir_p(path)
        os.popen('chmod 777 %s' % (path))
        workbook = Workbook()
        sheet1 = workbook.add_sheet(u'sheet1', )  # 创建sheet
        row0 = [
            u'主SKU', u'采购员', u'成本单价', u'加工费', u'胶印', u'数码批印', u'数码裁片', u'水印',
            u'拉链', u'帽绳/球', u'纽扣', u'橡筋', u'鸡眼/四合扣', u'皮牌', u'花边', u'织带',
            u'专机', u'拉条', u'烧花', u'烫图', u'A面料', u'A档口地址', u'A用量', u'A色号',
            u'B面料', u'B档口地址', u'B用量', u'B色号', u'C面料', u'C档口地址', u'C用量', u'C色号',
            u'其他'
        ]
        datalist = []

        for obj in objs:
            datalist.append([
                obj.MainSKU, obj.Buyer, obj.CostPrice, obj.ProcessCosts,
                obj.OffsetPrinting, obj.DigitalPrinting, obj.DigitalCuts,
                obj.Watermark, obj.Zipper, obj.Cap_rope_ball, obj.Button,
                obj.Elastic, obj.Cornseye_SnapButton, obj.LeatherCard,
                obj.Lace, obj.Webbing, obj.ZhuanJi, obj.LaTiao, obj.ShaoHua,
                obj.TangTu, obj.A_fabric, obj.A_address, obj.A_dosage,
                obj.A_color, obj.B_fabric, obj.B_address, obj.B_dosage,
                obj.B_color, obj.C_fabric, obj.C_address, obj.C_dosage,
                obj.C_color, obj.Other
            ])
        for i in range(0, len(row0)):
            sheet1.write(0, i, row0[i])
        for row, rowdata in enumerate(datalist):
            row = row + 1
            for j in range(0, len(row0)):
                sheet1.write(row, j, rowdata[j])

        filename = request.user.username + '_' + datetime.datetime.now(
        ).strftime('%Y%m%d%H%M%S') + '.xls'
        workbook.save(path + '/' + filename)
        os.popen(r'chmod 777 %s' % (path + '/' + filename))

        # 上传oss对象
        auth = oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET)
        bucket = oss2.Bucket(auth, ENDPOINT, BUCKETNAME_XLS)
        bucket.create_bucket(oss2.BUCKET_ACL_PUBLIC_READ)
        # 删除现有的
        for object_info in oss2.ObjectIterator(
                bucket,
                prefix='%s/%s_' %
            (request.user.username, request.user.username)):
            bucket.delete_object(object_info.key)
        bucket.put_object(u'%s/%s' % (request.user.username, filename),
                          open(path + '/' + filename))

        messages.error(
            request, u'%s%s.%s/%s/%s' % (PREFIX, BUCKETNAME_XLS, ENDPOINT_OUT,
                                         request.user.username, filename) +
            u':成功导出,可点击Download下载到本地............................。')
Beispiel #19
0
def import_img_resource():
    directory = get_form_param('Directory', not_none=True)
    config = Config.query.filter().first()
    access_key_id = config and config.accessKeyId or current_app.config[
        'ACCESS_KEY_ID']
    access_key_secret = config and config.accessKeySecret or current_app.config[
        'ACCESS_KEY_SECRET']
    oss_access_endpoint = config and config.ossAccessEndpoint or current_app.config[
        'OSS_TEST_ACCESS_ENDPOINT']
    oss_access_bucket = config and config.ossAccessBucket or current_app.config[
        'OSS_TEST_ACCESS_BUCKET']
    auth = oss2.Auth(access_key_id, access_key_secret)
    bucket = oss2.Bucket(auth, oss_access_endpoint, oss_access_bucket)
    # resource_list = []
    for obj in oss2.ObjectIterator(bucket, prefix=directory):
        # 通过is_prefix方法判断obj是否为文件夹。
        src_type = obj.key.split('.')[-1]
        if src_type in current_app.config['ALLOWED_EXTENSIONS']:
            if db.session.query(Information).filter_by(
                    imgPath=obj.key).first() is None:
                new_info = Information()
                new_info.imgPath = obj.key
                new_info.imgDirectory = directory
                db.session.add(new_info)
    db.session.commit()
    return jsonify({'Code': 'Success', 'Message': 'Success'})
Beispiel #20
0
    def get(self, timestamp_a, timestamp_b, data_type, target_path):
        """
        get baiduqx data between @param:timestamp_a and @param:timestamp_b at @param:city
        @param(str:yyyy_mm_dd):timestamp_a
        @param(str:yyyy_mm_dd):timestamp_b
        @param(str):data_type
        """
        time_a = [int(i) for i in timestamp_a.split("_")]
        time_b = [int(i) for i in timestamp_b.split("_")]
        timestamp_a = datetime.date(time_a[0], time_a[1], time_a[2])
        timestamp_b = datetime.date(time_b[0], time_b[1], time_b[2])

        files = [obj.key for obj in oss2.ObjectIterator(self.bucket)]
        res_files = []
        for obj_file in files:
            creation_time = obj_file.split("_")[-1].strip(".json")
            creation_times = creation_time.split("-")
            obj_data_type = obj_file.split("_")[1]
            timestamp_created = datetime.date(int(creation_times[0]),
                                              int(creation_times[1]),
                                              int(creation_times[2]))
            if timestamp_created >= timestamp_a and timestamp_created <= timestamp_b:
                if obj_data_type == data_type:
                    res_files.append(obj_file)

        for obj_file in res_files:
            object_stream = self.bucket.get_object(obj_file)
            with open(os.path.join(target_path, obj_file),
                      'wb') as local_fileobj:
                shutil.copyfileobj(object_stream, local_fileobj)
        return
Beispiel #21
0
    def listDir(self, dir1, timeRange=None):
        """
        :param dir1:downData/www_tianyancha_com/detail/company_1
        :param timeRange: fmt:2018020100-2018020200
        :return:
        """
        objects = []
        tryTime = 0  #

        def getNameTS(name):
            m = re.search(r"(\d{4})/(\d{2})/(\d{2})/(\d{2})", name)
            return "%s%s%s%s" % (m.group(1), m.group(2), m.group(3),
                                 m.group(4))

        while tryTime < 3:
            try:
                dir1 = preProcessDir(dir1)
                # 列出bucket中”fun/”目录下所有文件
                beginTime, endTime = timeRange.split("-") if timeRange else (
                    None, None)
                import oss2
                for idx, object_info in enumerate(
                        oss2.ObjectIterator(self.oss, prefix=dir1)):
                    if beginTime and endTime:
                        ts = getNameTS(object_info.key)
                        if ts < beginTime or ts >= endTime:
                            # logDebug("ignore:%s"%object_info.key)
                            continue
                    objects.append(object_info.key)
                    logDebug("%s:%s" % (idx, object_info.key))
                return objects
            except Exception, e:
                logException()
            tryTime += 1
            time.sleep(1)
Beispiel #22
0
    def to_excel(self, request, queryset):
        from xlwt import *
        path = MEDIA_ROOT + 'download_xls/' + request.user.username
        # if not os.path.exists(path):
        mkdir_p(MEDIA_ROOT + 'download_xls')
        os.popen('chmod 777 %s' % (MEDIA_ROOT + 'download_xls'))

        mkdir_p(path)
        os.popen('chmod 777 %s' % (path))

        w = Workbook()
        sheet = w.add_sheet('feed_trackNo_to_py')

        sheet.write(0, 0, u'订单号')
        sheet.write(0, 1, u'店铺单号')
        sheet.write(0, 2, u'跟踪号')

        # 写数据
        row = 0
        for qs in queryset:
            if qs.pyOrderNumber:

                row = row + 1
                column = 0
                sheet.write(row, column, qs.pyOrderNumber)

                column = column + 1
                sheet.write(row, column, '')

                t_order_track_info_amazon_india_objs = t_order_track_info_amazon_india.objects.filter(
                    AmazonOrderId=qs.AmazonOrderId)
                trackNo = ''
                if t_order_track_info_amazon_india_objs:
                    if isinstance(t_order_track_info_amazon_india_objs,list):
                        trackNo = t_order_track_info_amazon_india_objs[0][0].trackNumber
                    else:
                        trackNo = t_order_track_info_amazon_india_objs[0].trackNumber

                column = column + 1
                sheet.write(row, column, trackNo)
            else:
                messages.error(request,u'导出错误,订单号:%s缺少普源单号'% qs.AmazonOrderId)


        filename = request.user.username + '_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.xls'
        w.save(path + '/' + filename)
        os.popen(r'chmod 777 %s' % (path + '/' + filename))

        # 上传oss对象
        auth = oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET)
        bucket = oss2.Bucket(auth, ENDPOINT, BUCKETNAME_XLS)
        bucket.create_bucket(oss2.BUCKET_ACL_PUBLIC_READ)
        # 删除现有的
        for object_info in oss2.ObjectIterator(bucket,
                                               prefix='%s/%s_' % (request.user.username, request.user.username)):
            bucket.delete_object(object_info.key)
        bucket.put_object(u'%s/%s' % (request.user.username, filename), open(path + '/' + filename))

        messages.error(request, u'%s%s.%s/%s/%s' % (PREFIX, BUCKETNAME_XLS, ENDPOINT_OUT, request.user.username,
                                                    filename) + u':成功导出,可点击Download下载到本地............................。')
Beispiel #23
0
def prefix_all_list(bucket,bucket_svg,prefix):
    # print("开始列举"+prefix+"全部文件");
    oss_file_size = 0;
    for obj in oss2.ObjectIterator(bucket, prefix ='%s/'%prefix):
        #print(' key : ' + obj.key)
        oss_file_size = oss_file_size + 1;
        download_to_local(bucket,bucket_svg, obj.key, obj.key);
Beispiel #24
0
 def test_object_iterator_chinese(self):
     for prefix in [self.random_key('中+文'), self.random_key(u'中+文')]:
         self.bucket.put_object(prefix, b'content of object')
         object_got = list(
             oss2.ObjectIterator(self.bucket, prefix=prefix,
                                 max_keys=1))[0].key
         self.assertEqual(to_string(prefix), object_got)
Beispiel #25
0
def get_oss_version(project=None):
    try:
        versions = []
        if project:
            tt = time.strftime('%Y', time.localtime())
            auth = oss2.Auth(oss_id, oss_key)
            bucket = oss2.Bucket(auth, oss_url, 'xxxxops')
            for obj in oss2.ObjectIterator(bucket):
                if obj.key.endswith('.war') or obj.key.endswith(
                        '.tar.gz') or obj.key.endswith('.jar'):
                    if obj.key.split('/')[-1].startswith(project):
                        try:
                            ver = obj.key.split(tt)[-1].split('-')
                            version = int('%s%s' % (tt, ver[0]))
                            version = '%s-%s' % (version, ver[1].split('.')[0])
                            versions.append(version)
                        except:
                            pass
            versions = list(set(versions))
            versions.sort(reverse=True)
            if len(versions) > 10:
                versions = versions[:10]
    except Exception as e:
        logging.error(e)
    finally:
        return jsonify({project: versions})
Beispiel #26
0
def get_blogs(dir):
    bloglists = []
    blogmums = 0

    bloglist_and_nums = cache.get("allblogs")
    if bloglist_and_nums is not None:
        return bloglist_and_nums.get("bloglists"), bloglist_and_nums.get(
            "blognum")

    for object_info in oss2.ObjectIterator(buckets):
        filedata = dict()
        filename = object_info.key
        isexist = filename.find(dir)
        if isexist >= 0:
            if filename == dir:
                continue
            else:
                filecontent = buckets.get_object(filename).read()
                file_metadata = get_mk_metadata(filecontent)
                filedata['filename'] = filename.split('/')[1].split(".")[0]
                filedata['createtimes'] = get_unixtimestamp(
                    file_metadata.metadata.get('Date'))
                filedata['filepath'] = filename
                filedata['Title'] = file_metadata.metadata.get("Title")
                filedata["Date"] = file_metadata.metadata.get('Date')
                filedata["Summary"] = file_metadata.metadata.get('Summary')
                bloglists.append(filedata)
                blogmums = blogmums + 1

    def createtime(s):
        return s['createtimes']

    bloglists = sorted(bloglists, key=createtime, reverse=True)
    cache.set("allblogs", {'bloglists': bloglists, 'blognum': blogmums})
    return bloglists, blogmums
Beispiel #27
0
    def get_files(self, path):
        def _strip_path(_name, _path):
            if _name.startswith(_path):
                return _name.replace(_path, '', 1)
            return _name

        def _remove_trailing_slash(_name):
            return _name[:-1]

        def _iso_to_epoch(timestamp):
            dt = time.localtime(timestamp)
            return int(time.mktime(dt))

        files = []
        directories = []
        if path and not path.endswith(self.separator):
            path += self.separator
        for obj in oss2.ObjectIterator(bucket=self.bucket,
                                       prefix=path,
                                       delimiter=self.separator):
            if obj.key == path:
                continue
            if obj.is_prefix():
                name = _remove_trailing_slash(_strip_path(obj.key, path))
                key_name = _remove_trailing_slash(obj.key)
                directories.append((name, key_name, True, 0, 0))
            else:
                last_modified = _iso_to_epoch(obj.last_modified)
                name = _strip_path(obj.key, path)
                files.append((name, obj.key, False, obj.size, last_modified))
        return directories + files
    def upload_to_oss_forthwith(self, params):
        result = {
            'errorcode': 0,
            'errortext': '',
            'params': params,
            'result': ''
        }

        num = 4
        filelist = []
        for file in oss2.ObjectIterator(self.bucket,
                                        prefix='%s' % params['path']):
            filelist.append(file.key)
        length = len(filelist)
        if length > num:
            for key in filelist:
                self.bucket.delete_object(key)
                length = length - 1
                if length == num:
                    break

        self.bucket.put_object(u'%s/%s' % (params['path'], params['name']),
                               params['byte'])
        result[
            'result'] = PREFIX + self.bucketName + '.' + ENDPOINT_OUT + '/' + params[
                'path'] + '/' + params['name']

        return result
Beispiel #29
0
    def _list_paths(self, prefix, progress_callback=None):
        import oss2

        for blob in oss2.ObjectIterator(self.oss_service, prefix=prefix):
            if progress_callback:
                progress_callback()
            yield blob.key
Beispiel #30
0
    def __sync_file(self, dirname):
        utils.mkdir(dirname)
        try:
            os.chdir(dirname)
        except:
            logger.error('No such directory: {0}'.format(dirname))
            return

        for obj in oss2.ObjectIterator(self.bucket):
            if obj.key[-1] == '/':
                utils.mkdir(obj.key)
                continue
            else:
                #timestr = datetime.utcfromtimestamp(obj.last_modified).strftime('%Y-%M-%d %H:%M:%S')
                if os.path.exists(obj.key):
                    logger.info('File already downloaded: ' + obj.key)
                    continue
                # Start to download file
                try:
                    self.bucket.get_object_to_file(
                        obj.key, obj.key, progress_callback=utils.percentage)
                    logger.info('Download file successfully: ' + obj.key)
                except oss2.exceptions.NoSuchKey:
                    logger.warn('No such object key: {0}'.format(obj.key))
                except oss2.exceptions.InconsistentError:
                    continue
                except:
                    logger.error('Unexpected error')
                    exit(1)

        os.chdir(self.exec_dir)