예제 #1
0
def deprocess_images_fullTest(inputs, targets, nbTargets):
    targets = helpers.deprocess(targets)
    with tf.name_scope("transform_images"):
        targetShape = targets.get_shape()
        targets_reshaped = concat_tensor_display(targets,
                                                 axisToConcat=2,
                                                 axisToSplit=1)
        tensorOneInput, SurfaceLightFixedView, HemishpereLightFixedView = concatSplitInputs(
            inputs, axisToConcat=2,
            axisToSplit=1)  #HemishpereLightHemisphereView

        tensorOneInput = tf.concat([tensorOneInput, targets_reshaped], axis=2)
        SurfaceLightFixedView = tf.concat(
            [SurfaceLightFixedView, targets_reshaped], axis=2)
        HemishpereLightFixedView = tf.concat(
            [HemishpereLightFixedView, targets_reshaped], axis=2)
        #HemishpereLightHemisphereView  = tf.concat([HemishpereLightHemisphereView, targets_reshaped], axis = 2)

    with tf.name_scope("convert_images"):
        tensorOneInput = helpers.convert(tensorOneInput)
        SurfaceLightFixedView = helpers.convert(SurfaceLightFixedView)
        HemishpereLightFixedView = helpers.convert(HemishpereLightFixedView)
        #HemishpereLightHemisphereView = helpers.convert(HemishpereLightHemisphereView)

    return tensorOneInput, SurfaceLightFixedView, HemishpereLightFixedView  #, HemishpereLightHemisphereView
예제 #2
0
	def post(self):
		user = users.get_current_user()
		acc = entities.Account.get_by_id(user.user_id())
		inputName = helpers.convert(self.request.get('name'))
		inputMatric = helpers.convert(self.request.get('matric'))
		inputMobile = helpers.convert(self.request.get('contact'))
		if inputName:
			acc.name = inputName
		if inputMatric:
			acc.matric = inputMatric
		if inputMobile:
			acc.mobile = inputMobile
		acc.put()
		self.redirect('/')
예제 #3
0
	def post(self):
		inputDate = helpers.convert(self.request.get('date')) #DD/MM/YYYY
		inputStart = helpers.convert(self.request.get('start'))
		inputSpaces = helpers.convert(self.request.get('spaces'))
		year = int(inputDate[6:])
		month = int(inputDate[3:5])
		day = int(inputDate[0:2])
		hour = int(inputStart[0:2])
		minute = int(inputStart[2:])
		newSlot = Slot(
			startTime= datetime.datetime(year, month, day, hour, minute),
			totalSpaces= int(inputSpaces))
		newSlot.put()
		self.redirect('/admin_sessions')
예제 #4
0
def export_data():
    filepath = os.path.dirname(
        os.path.realpath(__file__))[:-4] + "static/data/"
    print(filepath)
    data = request.json["data"]
    if not data:
        return "No file"
    data = convert(data)
    filename, contentType = export_network_data(data, filepath)
    if data["format"] == "csv":
        m = MultipartEncoder(
            fields={
                'field1': (filename[0], open(filename[0], 'rb'), 'text/csv'),
                'field2': (filename[1], open(filename[1], 'rb'), 'text/csv')
            })
        return Response(m.to_string(), mimetype=m.content_type)
    if data["format"] == "pkl":
        m = MultipartEncoder(
            fields={
                'field1': (filename[0], open(filename[0], 'rb'), 'text'),
                'field2': (filename[1], open(filename[1], 'rb'), 'text')
            })
        return Response(m.to_string(), mimetype=m.content_type)
    return send_file(filepath + filename,
                     mimetype=contentType,
                     attachment_filename=filename,
                     as_attachment=True)
예제 #5
0
def register():
    try:
        username = request.form["username"]
        password = request.form["password"]

        db = sqlite3.connect(
            dir + "lame.db")  # dir + "blog.db") # connects to sqlite table
        c = db.cursor()
        c.execute("SELECT username FROM users")

        pre_existing_usernames = convert(list(c))

        if [username] in pre_existing_usernames:
            return auth_error(is_user_conflict=True)  # username already exists
        else:
            user_id = uuid4()  # generate new uuid for user
            c.execute(
                "INSERT INTO users (user_id, username, password) VALUES (?, ?, ?)",
                (
                    str(user_id),
                    username,
                    password,
                ))
            session['username'] = str(username)
            session['user_id'] = user_id
            db.commit()
            return user_page()
        return root()
    except:
        return random_error()
예제 #6
0
def jinni_search(query):
    logging.info(u'Doing a normal search for "{0}"'.format(query))
    
    # File "/usr/lib/python2.6/urllib.py", line 1269, in urlencode
    #  v = quote_plus(str(v))
    # UnicodeEncodeError: 'ascii' codec can't encode character u'\xe9' in position 1: ordinal not in range(128)
    #
    # See: http://mail.python.org/pipermail/baypiggies/2007-April/002102.html
    url = "http://www.jinni.com/discovery.html?{0}".format(urllib.urlencode({
        "query": query.encode("utf-8")
    }))
    
    request = urllib2.Request(url)
    response = open_url(request)
    content = response.read()
    document = lxml.html.soupparser.fromstring(content)
    
    # Find the script tag that contains the search results and parse it
    try:
        script_text = [script.text for script in document.xpath('//script[not(@src)]') 
            if "obj_collageEntry" in script.text][0]
        # PyNarcissus doesn't handle unicode properly:
        # 
        # File "jsparser.py", line 197, in __init__
        #   self.source = str(s)
        # UnicodeEncodeError: 'ascii' codec can't encode characters in position 31704-31706: ordinal not in range(128)
        # 
        # So encoding to UTF-8 first
        js_tree = parse_js(script_text.encode("utf-8"))
        results = convert(js_tree).values()
    except IndexError, ex:
        # No search results available
        results = []
    def new_frame(self, frame):

        self.RGB_img = frame
        self.ABC_img = convert(frame,
                               src_model='rgb',
                               dest_model=self.color_model)
        self.dims = self.RGB_img.shape

        self.hogA, self.hogA_img = self.hog(self.ABC_img[:, :, 0])
예제 #8
0
def scrapper(config, data, sleep=30):
    """
    Scrape data from some source
    """
    while True:
        for metric in config['metrics']:
            response = {
                'Datapoints': [
                    {
                        "Timestamp": datetime.utcnow(),
                        metric['aws_statistics'][0]:
                        random.randint(1, 420) / 100.0,
                        "Unit": "Percent"
                    },
                ]
            }

            dp = response['Datapoints']
            d = dp[-1]

            #  Example, create this line:
            #    aws_ebs_volume_read_bytes_maximum{volume_id="vol-035faf9767706322e"}
            #  from this config:
            #    aws_namespace: AWS/EBS
            #    aws_metric_name: VolumeReadBytes
            #    aws_dimensions: [VolumeId]
            #    aws_dimension_select:
            #      VolumeId: [vol-035faf9767706322e]
            #    aws_statistics: [Maximum]
            line = '{ns}_{n}_{s}{{{u}}}'.format(
                ns=convert(metric['aws_namespace']),
                n=convert(metric['aws_metric_name']),
                s=convert(metric['aws_statistics'][0]),
                u=get_dimensions_str(metric['aws_dimension_select']))
            data[line] = d[metric['aws_statistics'][0]]

        # sleep for 30 sec
        time.sleep(sleep)
예제 #9
0
def convert_menu(request):
    if request.method == 'POST':
        form = UploadForm(request.POST, request.FILES)
        if form.is_valid():
            # print(form.cleaned_data)
            # form.save()
            global uploaded_file_name
            uploaded_file_name = str(request.FILES['file_name'])
            up_file = UploadedFile(up_file=request.FILES['file_name'])
            up_file.save()

            convert(uploaded_file_name)

            #delete uploaded files after converting
            shutil.rmtree(base + '/media')

        else:
            # return
            print("FORM is NOT VALID**********")

    else:
        form = UploadForm()

    return render(request, 'index.html', locals())
예제 #10
0
def convert_menu(request):
    if request.method == 'POST':
        form = UploadForm(request.POST, request.FILES)
        if form.is_valid():
            # print(form.cleaned_data)
            # form.save()
            global uploaded_file_name
            uploaded_file_name = str(request.FILES['file_name'])
            up_file = UploadedFile(up_file=request.FILES['file_name'])
            up_file.save()

            convert(uploaded_file_name)

            #delete uploaded files after converting
            shutil.rmtree(base + '/media')
            
        else:
            # return
            print("FORM is NOT VALID**********")

    else:
        form = UploadForm()

    return render(request, 'index.html', locals())
예제 #11
0
def run():  # Command line user view
    # Parse arguments
    parser = argparse.ArgumentParser(description='Process parameters.')
    parser.add_argument('input_path',
                        metavar='I',
                        type=str,
                        help='filepath for input JSON')
    parser.add_argument('--markdown_format',
                        metavar='M',
                        type=str,
                        choices=helpers.supported_formats,
                        help='markdown format for output: ' +
                        ", ".join([str(f) for f in helpers.supported_formats]))
    parser.add_argument('--overwrite',
                        metavar='OV',
                        type=bool,
                        help='whether to overwrite existing output files')
    parser.add_argument('--filter',
                        metavar='F',
                        type=bool,
                        help='whether to filter which entities to convert')
    args = parser.parse_args()

    # Set config
    config.input_path = args.input_path
    if args.markdown_format:
        config.md_format = args.markdown_format
    if args.overwrite:
        config.overwrite_all = args.overwrite

    if args.filter:
        config.types_filtered = get_types_filtered()
    else:
        config.types_filtered = config.record_types.values(
        )  # default supported types

    helpers.setup_dir()  # Create output folder

    # Convert files
    print('==========')
    completion_data = helpers.convert(
        config.input_path, config.md_format,
        config.types_filtered)  # track total vs. success
    print("All done! Of {0} matched entities, {1} successfully converted.".
          format(*completion_data))
def detector(image,
             image_size,
             darknet_model,
             darknet_meta,
             darknet_image,
             darknet_size,
             log=False):

    # Load darknet image.
    image_resized = cv2.resize(image,
                               darknet_size,
                               interpolation=cv2.INTER_LINEAR)
    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())

    # Detect.
    results = darknet.detect_image(darknet_model,
                                   darknet_meta,
                                   darknet_image,
                                   thresh=0.25)

    for result in results:
        class_id, confidence, bounding_box = result
        class_id = class_id.decode('utf-8')
        if class_id == 'person':
            if log:
                print(f'{class_id}: {confidence}')
            # Convert from YOLO format.
            bounding_box = hp.convert(bounding_box)

            # Rescaling the bounding boxes.
            bounding_box = hp.rescale(image_size, darknet_size, bounding_box)
            start_point = (bounding_box[0], bounding_box[1])
            end_point = (bounding_box[2], bounding_box[3])

            # Add indicators.
            cv2.rectangle(image, start_point, end_point, (0, 255, 0), 1)
            cv2.putText(image, f'{class_id}: {confidence}',
                        (bounding_box[0], bounding_box[1] - 5),
                        cv2.FONT_HERSHEY_SIMPLEX, .2, (0, 255, 0), 1)

    result = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    return result
예제 #13
0
    def _isValid(self, queryParams=None):
        if self._authenticated and not self._token:
            return "User not authenticated"

        for arg in self._body_params:
            arg_name = arg.get('name')
            arg_type = arg.get('type')
            arg_required = arg.get('required')
            param = self._payload.get(arg_name)

            if arg_required and param is None:
                return "Parameter {0} is required".format(arg_name)

            if not arg_required and param is not None:
                if param.__class__.__name__ != arg_type:
                    return "Parameter {0} should be {1}".format(
                        arg_name, arg_type)

        if queryParams:
            for arg in self._query_params:
                arg_name = arg.get('name')
                arg_type = arg.get('type')
                arg_required = arg.get('required')

                if arg_required and arg_name not in queryParams:
                    return "Query parameter {0} is required".format(arg_name)
                elif arg_name in queryParams:
                    value = queryParams.get(arg_name)

                    #type is an array?
                    if "[]" in arg_type.replace(' ', ''):
                        main_type = arg_type.replace(' ', '').replace('[]', '')
                        values = value.split(',')

                        for v in values:
                            if main_type != "str":
                                v = helpers.convert(v)

                            if v.__class__.__name__.lower() != main_type:
                                return "Query parameter {0} is not {1}".format(
                                    arg_name, arg_type)
예제 #14
0
def deprocess_images(inputs, targets, nbTargets):
    #inputs = helpers.deprocess(inputs)
    targets = helpers.deprocess(targets)

    with tf.name_scope("transform_images"):
        targetShape = targets.get_shape()
        targets_reshaped = concat_tensor_display(targets,
                                                 axisToConcat=2,
                                                 axisToSplit=1)
        inputs_reshaped = tf.reshape(inputs, [
            -1,
            int(inputs.get_shape()[2]),
            int(inputs.get_shape()[3]),
            int(inputs.get_shape()[4])
        ])

        tensorToSave = tf.concat([inputs_reshaped, targets_reshaped], axis=2)
    with tf.name_scope("convert_images"):
        tensorToSave = helpers.convert(tensorToSave)

    return tensorToSave
예제 #15
0
def jinni_findSuggestionsWithFilters(query):
    logging.info(u'Doing a suggestion search for "{0}"...'.format(query))
    
    url = "http://www.jinni.com/dwr/call/plaincall/AjaxController.findSuggestionsWithFilters.dwr"
    values = {
        # Both the httpSessionId and scriptSessionId need to be submitted
        # or the server will respond with a "HTTP Error 501: Not Implemented".
        # However, they are not validated.
        # FIXME: when logged in for some reason you do need to send along a valid httpSessionId
        "httpSessionId": [cookie.value for cookie in cj if cookie.name == "JSESSIONID"][0],
        "scriptSessionId": "", # i.e. 3C675DDBB02222BE8CB51E2415259E99878
        "callCount": "1",
        "page": "/discovery.html",
        "c0-scriptName": "AjaxController",
        "c0-methodName": "findSuggestionsWithFilters",
        "c0-id": "0",
        "c0-param0": "string:{0}".format(query.encode("utf-8")),
        "c0-e1": "null:null",
        "c0-e2": "boolean:false",
        "c0-e3": "boolean:false",
        "c0-e4": "boolean:false",
        "c0-e5": "Array:[]",
        "c0-param1": "Object_Object:{contentTypeFilter:reference:c0-e1, onlineContentFilter:reference:c0-e2, dvdContentFilter:reference:c0-e3, theaterContentFilter:reference:c0-e4, contentAffiliates:reference:c0-e5}",
        "batchId": "2"
    }
    
    data = urllib.urlencode(values)
    request = urllib2.Request(url, data)
    response = open_url(request)
    content = response.read()
    
    js_tree = parse_js(content)
    tree = convert(js_tree)
    evaluate(js_tree, tree)
    
    results = tree["s1"]
    
    return results
예제 #16
0
    def __renderInputs(self, materials, renderingScene, jitterLightPos,
                       jitterViewPos, mixMaterials, isTest, renderSize):
        mixedMaterial = materials
        if mixMaterials:
            alpha = tf.random_uniform([1],
                                      minval=0.1,
                                      maxval=0.9,
                                      dtype=tf.float32,
                                      name="mixAlpha")
            #print("mat2: " + str(materials2))

            materials1 = materials[::2]
            materials2 = materials[1::2]

            mixedMaterial = helpers.mixMaterials(materials1, materials2, alpha)
        mixedMaterial.set_shape(
            [None, self.nbTargetsToRead, renderSize, renderSize, 3])
        mixedMaterial = helpers.adaptRougness(mixedMaterial)
        #These 3 lines below tries to scale the albedos to get more variety and to randomly flatten the normals to disambiguate the normals and albedos. We did not see strong effect for these.
        #if not isTest and self.useAugmentationInRenderings:
        #    mixedMaterial = helpers.adaptAlbedos(mixedMaterial, self.batchSize)
        #    mixedMaterial = helpers.adaptNormals(mixedMaterial, self.batchSize)

        reshaped_targets_batch = helpers.target_reshape(
            mixedMaterial
        )  #reshape it to be compatible with the rendering algorithm [?, size, size, 12]
        nbRenderings = self.maxInputToRead
        if not self.fixImageNb:
            #If we don't want a constant number of input images, we randomly select a number of input images between 1 and the maximum number of images defined by the user.
            nbRenderings = tf.random_uniform([1],
                                             1,
                                             self.maxInputToRead + 1,
                                             dtype=tf.int32)[0]
        rendererInstance = renderer.GGXRenderer(includeDiffuse=True)
        ## Do renderings of the mixedMaterial

        targetstoRender = reshaped_targets_batch
        pixelsToAdd = 0

        targetstoRender = helpers.preprocess(
            targetstoRender)  #Put targets to -1; 1
        surfaceArray = helpers.generateSurfaceArray(
            renderSize, pixelsToAdd
        )  #Generate a grid Y,X between -1;1 to act as the pixel support of the rendering (computer the direction vector between each pixel and the light/view)

        #Do the renderings
        inputs = helpers.generateInputRenderings(
            rendererInstance,
            targetstoRender,
            self.batchSize,
            nbRenderings,
            surfaceArray,
            renderingScene,
            jitterLightPos,
            jitterViewPos,
            self.useAmbientLight,
            useAugmentationInRenderings=self.useAugmentationInRenderings)
        #inputs = [helpers.preprocess(input) for input in inputs]

        randomTopLeftCrop = tf.zeros([self.batchSize, nbRenderings, 2],
                                     dtype=tf.int32)
        averageCrop = 0.0

        #If we want to jitter the renderings around (to try to take into account small non alignment), we should handle the material crop a bit differently
        #We didn't really manage to get satisfying results with the jittering of renderings. But the code could be useful if this is of interest to Ansys.
        if self.jitterRenderings:
            randomTopLeftCrop = tf.random_normal(
                [self.batchSize, nbRenderings, 2], 0.0,
                1.0)  #renderSize - self.cropSize, dtype=tf.int32)
            randomTopLeftCrop = randomTopLeftCrop * tf.exp(
                tf.random_normal(
                    [self.batchSize], 0.0,
                    1.0))  #renderSize - self.cropSize, dtype=tf.int32)
            randomTopLeftCrop = randomTopLeftCrop - tf.reduce_mean(
                randomTopLeftCrop, axis=1, keep_dims=True)
            randomTopLeftCrop = tf.round(randomTopLeftCrop)
            randomTopLeftCrop = tf.cast(randomTopLeftCrop, dtype=tf.int32)
            averageCrop = tf.cast(self.maxJitteringPixels * 0.5,
                                  dtype=tf.int32)
            randomTopLeftCrop = randomTopLeftCrop + averageCrop
            randomTopLeftCrop = tf.clip_by_value(randomTopLeftCrop, 0,
                                                 self.maxJitteringPixels)

        totalCropSize = self.cropSize

        inputs, targets = helpers.cutSidesOut(inputs, targetstoRender,
                                              randomTopLeftCrop, totalCropSize,
                                              self.firstAsGuide, averageCrop)
        print("inputs shape after" + str(inputs.get_shape()))

        self.gammaCorrectedInputsBatch = inputs
        tf.summary.image("GammadInputs",
                         helpers.convert(inputs[0, :]),
                         max_outputs=5)
        inputs = tf.pow(inputs, 2.2)  # correct gamma
        if self.logInput:
            inputs = helpers.logTensor(inputs)

        inputs = helpers.preprocess(inputs)
        targets = helpers.target_deshape(targets, self.nbTargetsToRead)
        return targets, inputs
예제 #17
0
 def convert(self):
     currentMeasureValue = helpers.getUnit(self.unit)
     futureMeasureValue = helpers.getUnit(self.conversionUnit)
     self.targetMeasure = helpers.convert(self.magnitude,
                                          currentMeasureValue,
                                          futureMeasureValue)
예제 #18
0
파일: app.py 프로젝트: mosobhy/CS50-Finance
def index():
    """Show portfolio of stocks"""
    # defining the lists.
    unique_symbol = []
    unique_shares = []
    total = 0

    # query the database for the symbol to look up for stock data of that particular symbol.
    rows = db.execute("SELECT cash FROM users WHERE id = :ID", ID = session["user_id"])

    # iterate over the returned list of dictionaries of the rows query.
    cashed = []     # this is will be passed to the template, ==> cash.
    for i in rows:
        if i["cash"] not in cashed:
            cashed.append(usd(i["cash"]))


    user_exist = db.execute("SELECT symbol FROM history WHERE id = :ID", ID = session["user_id"])

    # render a speical template for the none existing users (didn't make any purchase yet)
    if not user_exist:
        return render_template("index2.html", cash = rows[0]["cash"])

    # iterating over the user_exist list of dicts.
    for i in user_exist:
        # check if the current symbol not in the list, so append it.
        if i["symbol"] not in unique_symbol:
            unique_symbol.append(i["symbol"])

    # iterating over the unique_symbol to seek the shares for each particular symbol.
    for i in unique_symbol:
        # query the database to retrieve the shares for that symbol.
        shares = db.execute("SELECT shares FROM history WHERE id = :ID AND symbol = :symbol",
                                                            ID = session["user_id"], symbol = i)
        # iterate over the list containing the shares dictionary.
        for j in shares:
            total += j["shares"]
        unique_shares.append(total)
        total = 0

    # convert the tow lists into dictionary using the convert function.
    symbols_shares = convert(unique_symbol, unique_shares)  # pass to the template ==> symbol, shares.

    # iterate over the symbol to lookup them in the API.
    looked_stocks = []      # pass to the template ==> price, name.
    for i in symbols_shares:
        # watch out that lookup returns a dictionary.
        stocks = lookup(i)
        if i not in looked_stocks:
            looked_stocks.append(stocks)

    # insert the cash and shares into the looked_stocks list of dicts.
    for i in looked_stocks:
        # iterate over the symobls and shares.
        for key, val in symbols_shares.items():
            # this line is an error because the usd returns the num as a str i["price"] = usd(i["price"])
            if i["symbol"] == key:
                # insert the shares and total.
                i["shares"] = val
                i["total"] = i["price"] * i["shares"]
                # formating the money.
                i["total"] = usd(i["total"])
                i["price"] = usd(i["price"])

    return render_template("index.html", looked_stocks = looked_stocks, cashed = cashed)
예제 #19
0
def user_page():
    try:
        # ADVICE SLIP API
        adv_data = urllib.request.urlopen("https://api.adviceslip.com/advice")
        adv_readable = adv_data.read()
        adv_d = json.loads(adv_readable)
        slip = adv_d["slip"]
        advice = slip["advice"]

        # PUBLIC HOLIDAY API
        API_KEY0 = open("keys/key_api0.txt", "r").read()
        curr_time = strftime("%m:%d:%y", localtime())
        month = curr_time[:2]
        day = curr_time[3:5]
        year = "20" + curr_time[6:]

        holi_data = urllib.request.urlopen(
            "https://holidays.abstractapi.com/v1/?api_key=" + API_KEY0 +
            "&country=US&year=" + year + "&month=" + month + "&day=" + day)
        holi_readable = holi_data.read()
        holi_d = json.loads(holi_readable)
        if (len(holi_d) >= 1):
            days = holi_d[0]
            holiday = days["name"]
        else:
            holiday = "No Holiday(s) Today"

        ## WEATHER API
        API_KEY1 = open("keys/key_api1.txt", "r").read()

        weather = urllib.request.urlopen(
            "https://api.openweathermap.org/data/2.5/weather?q=New%20York&appid="
            + API_KEY1).read()
        weather = json.loads(weather.decode('utf-8'))
        weather_dict = {}
        weather_dict['main'] = weather['weather'][0]['main']
        weather_dict['temp'] = int(9.0 / 5.0 *
                                   (weather['main']['temp'] - 273) + 32)

        # DOG PHOTO API
        u = urllib.request.urlopen("https://dog.ceo/api/breeds/image/random")
        response = u.read()
        data = json.loads(response)
        pic = data['message']

        # RETRIEVE USER NOTE
        db = sqlite3.connect(
            dir + "lame.db")  # dir + "blog.db") # connects to sqlite table
        c = db.cursor()
        c.execute("SELECT content FROM user_note WHERE user_id=?",
                  (str(session.get("user_id")), ))
        prev_content = tup_clean(c)  # returns list of each element from cursor

        if len(
                prev_content
        ) > 0:  # if the user already had a note saved from a previous session
            note = a_remove(
                prev_content[0]
            )  # clean all apostrophes from the content the user uploaded
        else:
            note = "Write anything here, and click the Save button below to save your work for the future!"

        # RETREIVE TODO LIST
        c.execute(
            "SELECT title, body, item_id FROM todo WHERE user_id=? ORDER BY date_time",
            (str(session.get("user_id")), ))
        todo_tuple = list(c)
        num_items_already_in_list = len(
            todo_tuple
        )  # checks to see if user has any items in to do list preexisting

        if num_items_already_in_list > 0:
            todo_list = convert(
                todo_tuple
            )  # converts list of tuples into list of subscriptable lists
        else:
            todo_list = []

        return render_template("user_page.html",
                               greeting=get_greeting(session.get("username")),
                               adv=advice,
                               holi=holiday,
                               user_note=note,
                               picture=pic,
                               to_dos=todo_list,
                               weather=weather_dict,
                               route="/")
    except:
        return random_error()
예제 #20
0
 def test_23kmAcm(self):
     self.assertEqual(2300000, helpers.convert(23, 1000, 0.01))
예제 #21
0
 def test_15mAkm(self):
     self.assertEqual(0.015, helpers.convert(15, 1, 1000))
예제 #22
0
 def test_200cmAkm(self):
     self.assertEqual(0.002, helpers.convert(200, 0.01, 1000))
예제 #23
0
 def test_100cmAm(self):
     self.assertEqual(1, helpers.convert(100, 0.01, 1))
예제 #24
0
nr_bigrams = bigrammer(TEMPFILE0_NAME, BIGRAMMED_FILE_NAME) # Can tune threshold and mincount

print('Found %d bigrams' % nr_bigrams, file=sys.stderr)

print('Fixing POS in bigrams...', file=sys.stderr)
bigram_file = smart_open(BIGRAMMED_FILE_NAME, 'r')
CONV_BIGRAM_FILE_NAME = BIGRAMMED_FILE_NAME.replace('_bigrams.txt', '_conv_bigrams.txt')
conv_bigram_file = smart_open(CONV_BIGRAM_FILE_NAME, 'a')

for line in bigram_file:
    res = line.strip().split()
    newwords = []
    for word in res:
        if ':::' in word:
            newword = convert(word)
        else:
            newword = word
        newwords.append(newword)
    conv_bigram_file.write(' '.join(newwords))
    conv_bigram_file.write('\n')
bigram_file.close()
conv_bigram_file.close()
print('Fixed bigrams written to %s...' % CONV_BIGRAM_FILE_NAME, file=sys.stderr)

print('Filtering the corpus...', file=sys.stderr)

# STOPWORDS_FILE = 'stopwords_ru'
# stopwords = set([w.strip().lower() for w in smart_open(STOPWORDS_FILE,'r').readlines()])
functional = set('ADP AUX CCONJ DET PART PRON SCONJ PUNCT'.split())
SKIP_1_WORD = True
예제 #25
0
def scrapper(config, data, sleep=30):
    """
    Scrape metrics from AWS cloudwatch
    """
    c = boto3.client('cloudwatch', region_name=config['region'])

    while True:
        # Run a new scrape each "sleep" scoundes
        for metric in config['metrics']:
            print('INFO: Reading metric %s from aws_namespace %s [%s]' %
                  (metric['aws_metric_name'], metric['aws_namespace'],
                   config['region']))

            request_args = dict(
                Namespace=metric['aws_namespace'],
                MetricName=metric['aws_metric_name'],
                Dimensions=get_dimensions(metric['aws_dimension_select']),
                StartTime=(datetime.utcnow() -
                           timedelta(seconds=int(metric['range_seconds']))),
                EndTime=datetime.utcnow(),
                Period=60,
                Statistics=[metric['aws_statistics'][0]])

            response = c.get_metric_statistics(**request_args)
            dp = response['Datapoints']

            #  Example, create this line:
            #    aws_ebs_volume_read_bytes_maximum{volume_id="vol-035faf9767706322e"}
            #  from this config:
            #    aws_namespace: AWS/EBS
            #    aws_metric_name: VolumeReadBytes
            #    aws_dimensions: [VolumeId]
            #    aws_dimension_select:
            #      VolumeId: [vol-035faf9767706322e]
            #    aws_statistics: [Maximum]
            line = '{ns}_{n}_{s}{{{u}}}'.format(
                ns=convert(metric['aws_namespace']),
                n=convert(metric['aws_metric_name']),
                s=convert(metric['aws_statistics'][0]),
                u=get_dimensions_str(metric['aws_dimension_select']))

            if len(dp) == 0:
                print('WARN: Empty metric %s in namespace %s [%s]' %
                      (metric['aws_metric_name'], metric['aws_namespace'],
                       config['region']))

                # Clear data point
                if line in data:
                    del data[line]
            else:
                # Update data with new value
                # last value is the newset
                d = dp[-1]
                data[line] = d[metric['aws_statistics'][0]]

                print('INFO: Metric %s in namespace %s [%s]:' %
                      (metric['aws_metric_name'], metric['aws_namespace'],
                       config['region']))

        # Wait "sleep" scounds
        time.sleep(sleep)