Exemplo n.º 1
0
 def bucket(self):
     qrcodes=[]
     for q in range(BUCKET_SIZE):
         qrcodes.append(QRCode())
     keys=ndb.put_multi(qrcodes)
     keys=Inventory.assemble(keys)
     shard_string_index = str(random.randint(0, NUM_SHARDS - 1))
     bucket = Bucket.get_by_id(shard_string_index)
     if bucket is None:
         bucket = Bucket(id=shard_string_index)
     bucket.headcount += len(keys)
     bucket.qrcodes=keys
     bucket.put()
Exemplo n.º 2
0
    def create_instance(self, body_data):
        """
        重载修改后:
        *.将password字段加密

        :param body_data: 创建对象时的传入数据
        :return: instance: 创建操作后的对象
        """
        password = generate_password_hash(body_data["password"],
                                          method="pbkdf2:sha256")
        body_data["password"] = password
        body_data["creator_id"] = g.login_user.id
        if "img_url" not in body_data:
            body_data[
                "img_url"] = "https://gw.alipayobjects.com/zos/rmsportal/BiazfanxmamNRoxxVxka.png"
        # 判断该模型是否支持所有输入属性
        for item in body_data:
            if not hasattr(self.model, item):
                del body_data[item]
        # 创建对象
        user = self.model(**body_data)
        db.session.add(user)
        db.session.commit()

        # 创建空间
        space_data = {
            "name": user.username + "'s private space",
            "space_type": "private",
            "own_user_id": user.id
        }
        space = Space(**space_data)
        db.session.add(space)
        db.session.commit()

        # 创建桶
        bucket_data = {
            "name": space.name + "'s 1st bucket",
            "space_id": space.id
        }
        bucket = Bucket(**bucket_data)
        db.session.add(bucket)
        db.session.commit()

        # 创建空间根目录
        bucket_data = {
            "object_name": space.name + "'s root",
            "object_type": "folder",
            "object_size": 0,
            "creator_id": user.id,
            "bucket_id": bucket.id
        }
        folder_root = File(**bucket_data)
        db.session.add(folder_root)
        db.session.commit()

        # 关联空间与根目录
        space.root_folder = folder_root
        db.session.commit()

        return user
Exemplo n.º 3
0
def get_buckets(inp,sum, cy):

    splits = inp.split('.') # parse for node/prop values to be counted by
    node = splits[0]
    prop = splits[1]
    bucketl,sortl = ([] for i in range(2)) # need two lists to sort these buckets by size

    if sum == "no": # not a full summary, just key and doc count need to be returned
        res = count_props(node, prop, cy)
        for x in range(0,len(res)):
            if res[x]['prop'] != "":
                cur = Bucket(key=res[x]['prop'], docCount=res[x]['counts'])
                sortl.append(int(res[x]['counts']))
                bucketl.append(cur)

        return BucketCounter(buckets=[bucket for(sort,bucket) in sorted(zip(sortl,bucketl),reverse=True)])

    else: # return full summary including case_count, doc_count, file_size, and key
        res = count_props_and_files(node, prop, cy)
        for x in range(0,len(res)):
            if res[x]['prop'] != "":
                cur = SBucket(key=res[x]['prop'], docCount=res[x]['dcounts'], fileSize=res[x]['tot'], caseCount=res[x]['ccounts'])
                bucketl.append(cur)

        return SBucketCounter(buckets=bucketl)
Exemplo n.º 4
0
 def alloc(self, campaign_key, n):
     keys=[]
     remainder=n
     for bucket in Bucket.query():
         available=len(bucket.qrcodes)
         if available>=remainder:
             keys.extend(bucket.qrcodes[:remainder])
             bucket.qrcodes=bucket.qrcodes[remainder:]
             bucket.headcount-=remainder
             bucket.put()
             qrcodes=[]
             for key in keys:
                 qrcode=key.get()
                 qrcode.campaign=campaign_key
                 qrcodes.append(qrcode)
             keys=ndb.put_multi(qrcodes)
             campaign=campaign_key.get()
             campaign.qrcodes.extend(keys)
             campaign.tally+=len(keys)
             campaign.put()
             return keys
         else:
             remainder-=available
             keys.extend(bucket.qrcodes)
             bucket.qrcodes=[]
             bucket.headcount=0
             bucket.put()
Exemplo n.º 5
0
    def clean_up(self):

        buckets = list(Bucket.query())
        snippet_bucket = list(Snippet_Bucket.query())
        for bucket in buckets:
            bucket.key.delete()
        for bucket in snippet_bucket:
            bucket.key.delete()
Exemplo n.º 6
0
    def post(self):

        self.write("start building index.")

        self.clean_up()

        # call web crawlers
        seeds = [('http://stackoverflow.com/', 300),
                 ('https://uwaterloo.ca/', 300),
                 ('https://www.reddit.com/', 300),
                 ('https://www.ft.com/', 300)]
        index, graph, snippet_lookup = G_search.crawl_web(seeds, "BFS")

        # split big dictionary into small buckets

        buckets = G_search.convert_to_buckets(index)
        snippet_buckets = G_search.convert_to_buckets(snippet_lookup)
        ranks = G_search.compute_ranks(graph)

        # write objects to data base

        for key in buckets:
            current_bucket = Bucket(owner='public',
                                    hash_code=str(key),
                                    dictionary=buckets[key])
            current_bucket.put()

        for key in snippet_buckets:
            current_bucket = Snippet_Bucket(hash_code=str(key),
                                            dictionary=snippet_buckets[key])
            current_bucket.put()

        curr_ranks = Ranks(owner='public', ranks=ranks)
        curr_ranks.put()
Exemplo n.º 7
0
 def get_all_buckets(self):
     buckets = []
     bucket_list = os.listdir(self.root)
     bucket_list.sort()
     for bucket in bucket_list:
         mtime = os.stat(os.path.join(self.root, bucket)).st_mtime
         create_date = datetime.fromtimestamp(mtime).strftime('%Y-%m-%dT%H:%M:%S.000Z')
         buckets.append(Bucket(bucket, create_date))
     return buckets
Exemplo n.º 8
0
    def create_instance(self, body_data):
        """
        重载修改后:
        *.将password字段加密

        :param body_data: 创建对象时的传入数据
        :return: instance: 创建操作后的对象
        """
        body_data["creator_id"] = g.login_user.id
        # 判断该模型是否支持所有输入属性
        for item in body_data:
            if not hasattr(self.model, item):
                del body_data[item]
        # 创建对象
        group = self.model(**body_data)
        group.users = [g.login_user, ]
        db.session.add(group)
        db.session.commit()

        # 创建空间
        space_data = {
            "name": group.name + "'s private space",
            "space_type": "group",
            "own_group_id": group.id
        }
        space = Space(**space_data)
        db.session.add(space)
        db.session.commit()

        # 创建桶
        bucket_data = {
            "name": space.name + "'s 1st bucket",
            "space_id": space.id
        }
        bucket = Bucket(**bucket_data)
        db.session.add(bucket)
        db.session.commit()

        # 创建空间根目录
        bucket_data = {
            "object_name": space.name + "'s root",
            "object_type": "folder",
            "object_size": 0,
            "creator_id": g.login_user.id,
            "bucket_id": bucket.id
        }
        folder_root = File(**bucket_data)
        db.session.add(folder_root)
        db.session.commit()

        # 关联空间与根目录
        space.root_folder = folder_root
        db.session.commit()

        return group
Exemplo n.º 9
0
def consume_tokens(id_user, bucket_type, token_count):
    bucket = Bucket.query.filter_by(id_user=id_user, type=bucket_type).first()

    bucket_size = int(app.config['CLOUD_SESSION_PROPERTIES']['bucket.%s.size' %
                                                             bucket_type]) or 0
    bucket_stream_input = int(
        app.config['CLOUD_SESSION_PROPERTIES']['bucket.%s.input' %
                                               bucket_type]) or 0
    bucket_stream_frequency = int(
        app.config['CLOUD_SESSION_PROPERTIES']['bucket.%s.freq' %
                                               bucket_type]) or 1000

    if bucket is None:
        bucket = Bucket()
        bucket.id_user = id_user
        bucket.type = bucket_type
        bucket.content = bucket_size
        bucket.timestamp = datetime.datetime.now()

        db.session.add(bucket)
        db.session.flush()
        db.session.refresh(bucket)

        old_bucket_content = bucket_size
    else:
        old_bucket_content = bucket.content

        elapsed_milliseconds = (datetime.datetime.now() -
                                bucket.timestamp).total_seconds() * 1000
        input_count = elapsed_milliseconds / bucket_stream_frequency
        bucket.content = int(
            min(bucket_size,
                (input_count * bucket_stream_input) + bucket.content))

    if bucket.content < token_count:
        milliseconds_till_enough = (
            token_count - old_bucket_content) * bucket_stream_frequency
        date_when_enough = bucket.timestamp + datetime.timedelta(
            milliseconds=milliseconds_till_enough)
        # Log and return or throw error
        return False

    bucket.content = bucket.content - token_count
    bucket.timestamp = datetime.datetime.now()
    return True
Exemplo n.º 10
0
    def add_bucket():
        try:
            eventid = request.json['event_id']
            emailsubject = request.json['email_subject']
            emailcontent = request.json['email_content']
            timestamp = request.json['timestamp']

            new_bucket = Bucket(event_id=eventid,
                                email_subject=emailsubject,
                                email_content=emailcontent,
                                timestamp=timestamp)
            new_bucket.save()

            process_time_utc = parse(timestamp) - timedelta(hours=8)
            print "process_time_utc: " + str(process_time_utc)

            emailrecipients = EmailAddress.query.filter_by(
                event_id=eventid).all()
            if len(emailrecipients) > 0:
                for toemail in emailrecipients:
                    toemail = str(toemail)
                    execute_email.apply_async(args=[
                        toemail,
                        eventid,
                        emailsubject,
                        emailcontent,
                    ],
                                              eta=process_time_utc)
                taskmsg = "Task added."
            else:
                taskmsg = "Email address of eventid:%s is not exist." % eventid

            print taskmsg
            return bucket_schema.jsonify(new_bucket), 201

        except IntegrityError as e:
            db.session.rollback()
            return jsonify({'Message': e.message})
Exemplo n.º 11
0
    def get(self, query):

        query = self.request.get("query")
        type_search = self.request.get("type")

        num_results = 10
        tokens = G_search.split_string(query,
                                       " .,:;'\"{}[]=-_`~\n<>!?/\\#$%^&*()+")

        index_set_list = []
        num_buckets = len(list(Bucket.query()))

        if num_buckets == 0:
            self.response.write("empty index")
            return None

        for token in tokens:
            token = token.lower()
            hash_code = G_search.hash_string(token, num_buckets)
            bucket = Bucket.query(Bucket.hash_code == str(hash_code)).get()
            dictionary = bucket.dictionary
            if token in dictionary:
                index_set_list.append(set(dictionary[token]))

        if not index_set_list:
            self.response.write("keyword not found")
            return None

        links = list(set.intersection(*index_set_list))

        if type_search == "public":

            ranks = list(Ranks.query())[0].ranks

        url_ranks_look_up = dict()

        for link in links:
            url_ranks_look_up[link] = ranks[link]
        sorted_url_ranks_list = sorted(url_ranks_look_up.items(),
                                       key=operator.itemgetter(1),
                                       reverse=True)

        result = []

        for url_rank_tuple in sorted_url_ranks_list:
            result.append(url_rank_tuple[0])

        if len(sorted_url_ranks_list) > num_results:
            result = result[:num_results]

        if not result:
            self.response.write("keyword not found")

        result_tuple_lst = []
        for link in result:

            num_buckets = len(list(Snippet_Bucket.query()))
            hash_code = G_search.hash_string(link, num_buckets)
            bucket = Snippet_Bucket.query(
                Snippet_Bucket.hash_code == str(hash_code)).get()
            dictionary = bucket.dictionary
            t = (link, dictionary[link][0], dictionary[link][1])

            result_tuple_lst.append(t)

        self.render("result.html", results=result_tuple_lst)
Exemplo n.º 12
0
 def bucket_count():
     total=0
     for bucket in Bucket.query():
         total += bucket.headcount
     return total
Exemplo n.º 13
0
#Read Compte
comptes = session.query(Compte)
for compte in comptes:
    print(compte.Nom)

# Update
compte.Nom = "Nouveau nom"
session.commit()

# Delete Compte
#session.delete(compte)
#session.commit()

#Create Bucket
bucket = Bucket(Nom="Super Bucket",
                Taille="Trop grand",
                Type="En plastique",
                compteId=4)
session.add(bucket)
session.commit()

#Read Bucket
buckets = session.query(Bucket)
for bucket in buckets:
    print(bucket.Nom)
# Update
bucket.Nom = "Nouveau nom"
bucket.Taille = "Trop petit",
bucket.Type = "En fer",
bucket.compteId = 4
session.commit()
Exemplo n.º 14
0
    def post(self, id):
        u = User.query.filter_by(id=id).first()
        if u.id != g.user.id:
            return {'status':'error',
                    'description':'Unauthorized'}, 401

        if request.json:
            params = request.json
        elif request.form:
            params = {}
            for key in request.form:
                params[key] = request.form[key]
        else:
            return {'status':'error','description':'Request Failed'}, 400

        # Replace blank value to None(null) in params
        for key in params:
            params[key] = None if params[key] == "" else params[key]

            if key in ['id', 'user_id', 'reg_dt', 'language']:
                return {'error': key + ' cannot be entered manually.'}, 401

        # Bucket Title required
        if not 'title' in params:
            return {'error':'Bucket title required'}, 401

        # Check ParentID is Valid & set level based on ParentID
        if not 'parent_id' in params or params['parent_id'] == None:
            level = 0
        else:
            b = Bucket.query.filter_by(id=params['parent_id']).first()
            if b is None:
                return {'error':'Invalid ParentID'}, 401
            elif b.user_id != g.user.id:
                return {'error':'Cannot make sub_bucket with other user\'s Bucket'}, 401
            else:
                level = int(b.level) + 1

        if 'rpt_cndt' in params:
            dayOfWeek = datetime.date.today().weekday()
            if params['rpt_type'] == 'WKRP':
                if params['rpt_cndt'][dayOfWeek] == '1':
                    p = Plan(date=datetime.date.today().strftime("%Y%m%d"),
                             user_id=g.user.id,
                             bucket_id=None,
                             status=0,
                             lst_mod_dt=datetime.datetime.now())
                    db.session.add(p)

        if 'photo' in request.files:
            upload_type = 'photo'

            if len(request.files[upload_type].filename) > 64:
                return {'status':'error','description':'Filename is too long (Max 64bytes include extensions)'}, 403
            upload_files = UploadSet('photos',IMAGES)
            configure_uploads(app, upload_files)

            filename = upload_files.save(request.files[upload_type])
            splits = []

            for item in filename.split('.'):
                splits.append(item)
            extension = filename.split('.')[len(splits) -1]

            f = File(filename=filename, user_id=g.user.id, extension=extension, type=upload_type)
            db.session.add(f)
            db.session.flush()
            db.session.refresh(f)


        bkt = Bucket(title=params['title'],
                     user_id=g.user.id,
                     level=str(level),
                     status= params['status'] if 'status' in params else True,
                     private=params['private'] if 'private' in params else False,
                     reg_dt=datetime.datetime.now(),
                     lst_mod_dt=datetime.datetime.now(),
                     deadline=datetime.datetime.strptime(params['deadline'],'%Y-%m-%d').date() if 'deadline' in params \
                                                                                      else datetime.datetime.now(),
                     description=params['description'] if 'description' in params else None,
                     parent_id=params['parent_id'] if 'parent_id' in params else None,
                     scope=params['scope'] if 'scope' in params else None,
                     range=params['range'] if 'range' in params else None,
                     rpt_type=params['rpt_type'] if 'rpt_type' in params else None,
                     rpt_cndt=params['rpt_cndt'] if 'rpt_cndt' in params else None,
                     cvr_img_id=f.id if 'photo' in request.files else None)
                     # cvr_img_id=f.id if 'cvr_img' in params and params['cvr_img'] == 'true' else None)
        db.session.add(bkt)
        db.session.flush()
        db.session.refresh(bkt)

        if 'rpt_cndt' in params:
            if params['rpt_type'] == 'WKRP' and params['rpt_cndt'][dayOfWeek] == '1':
                p.bucket_id = bkt.id

        if 'fb_share' in params:
            social_user = UserSocial.query.filter_by(user_id=u.id).first()
            graph = facebook.GraphAPI(social_user.access_token)
            resp = graph.put_object("me","feed",
                         message= g.user.username + " Posted " + params['title'].encode('utf-8'),
                         link="http://masunghoon.iptime.org:5001",
                         picture=photos.url(File.query.filter_by(id=bkt.cvr_img_id).first().name) if 'photo' in request.files else None,
                         caption="Dream Proj.",
                         description=None if bkt.description is None else bkt.description.encode('utf-8'),
                         name=bkt.title.encode('utf-8'),
                         privacy={'value':params['fb_share'].encode('utf-8')})

            bkt.fb_feed_id = resp['id']

        db.session.commit()

        data={
            'id': bkt.id,
            'user_id': bkt.user_id,
            'title': bkt.title,
            'description': bkt.description,
            'level': bkt.level,
            'status': bkt.status,
            'private': bkt.private,
            'parent_id': bkt.parent_id,
            'reg_dt': bkt.reg_dt.strftime("%Y-%m-%d %H:%M:%S"),
            'deadline': bkt.deadline.strftime("%Y-%m-%d"),
            'scope': bkt.scope,
            'range': bkt.range,
            'rpt_type': bkt.rpt_type,
            'rpt_cndt': bkt.rpt_cndt,
            'lst_mod_dt': None if bkt.lst_mod_dt is None else bkt.lst_mod_dt.strftime("%Y-%m-%d %H:%M:%S"),
            'cvr_img_url': None if bkt.cvr_img_id is None else photos.url(File.query.filter_by(id=bkt.cvr_img_id).first().name),
            'fb_feed_id':None if bkt.fb_feed_id is None else bkt.fb_feed_id
        }

        return {'status':'success',
                'description':'Bucket posted successfully.',
                'data':data}, 201