예제 #1
0
 def post(self):
     try:
         # get url param from request
         long_url = request.args.get('url')
         # get time param from url time can be null
         time = request.args.get('time')
         # check that url has a true value
         validate(long_url)
         # make a random short string
         short_url = random_generator()
         # make a new object from url model
         url = Url()
         # if time doesn't null convert string to a datetime object
         if time is not None:
             url.pub_date = datetime.strptime(time, '%Y-%m-%d %H:%M')
         url.long_url = long_url
         url.short_url = short_url
         db.session.add(url)
         db.session.commit()
         # if process doesn't have any error we return the short url in format of jason to user
         return {'short_url': short_url}
     except ValidationError:
         return {'error': 'an error has occurred'}
     except Exception:
         return {'error': 'wrong format time error'}
예제 #2
0
파일: app.py 프로젝트: jwyx3/practices
 def post(self):
     args = parser.parse_args()
     if args.get('custom_alias'):
         url_hash = args['custom_alias']
     else:
         url_hash = base62_encode(get_url_id())
         _LOG.info(f'use url_hash: {url_hash}')
     try:
         Url.create(
             id=url_hash, url=args['url'], expired_at=args.get('expired_at'),
             created_at=datetime.utcnow()
         ).if_not_exists()
     except LWTException:
         abort(409, description=f'/{url_hash} is already used')
     return {'short_url': url_for('geturl', url_hash=url_hash, _external=True)}, 201
예제 #3
0
    def post(self):
        json_data = request.get_json(force=True)
        if not json_data:
            return {'message': 'No input data provided'}, 400

        # Validate and deserialize input
        data, errors = url_schema.load(json_data)
        if errors:
            return errors, 422

        print data

        url = data['url'].split('//')
        url = url[1:] if len(url) == 2 else url[:]
        url = url[0].split('/')
        url[0] = url[0].split('.')
        url[0] = ".".join(url[0][1:] if len(url[0]) == 3 else url[0][:])
        url = "/".join(url)

        id = Url.query.filter_by(url=url).first()
        if not id:
            id = Url.query.order_by('-id').first()
            if not id:
                id = 0
            short_url = host + hashids.encode(id + 1)

            data = Url(url=url, short_url=short_url)
            db.session.add(data)
            db.session.commit()
        else:
            return {'message': 'Short url already exists'}, 400

        return {'status': 'success'}, 201
예제 #4
0
def create_new_url():
    """Creates a new shorter url and adds to db"""

    original_url = request.form.get('original_url')

    #Checks to see if inputted url is valid
    if not validators.url(original_url):
        return render_template('not_valid_url.html')



    custom_url = request.form.get('custom_url')

    row = Url.query.filter_by(original_url=original_url).first()

    if row:
        flash("This URL is already in our system. This is the shortened URL:  ")
        return render_template('result.html', shortened_url=row.shortened_url)

    if custom_url:
        if custom_url[:2] == "0x":
            flash("Please choose another custom url.")
            return redirect('/')
        custom_row = Url.query.filter_by(shortened_url=custom_url).first()

        if custom_row:

            flash("We're sorry, that custom shortened url is already taken")
            return redirect('/')
        else:

            new_shortened = Url(original_url=original_url, shortened_url=custom_url)
            db.session.add(new_shortened)
            db.session.commit()
            return render_template('result.html', shortened_url=custom_url)



    new_shortened = Url(original_url=original_url)
    db.session.add(new_shortened)
    db.session.commit()

    new_shortened.shortened_url = hex(int(new_shortened.id))
    db.session.commit()

    return render_template('result.html', shortened_url=new_shortened.shortened_url)
예제 #5
0
  def get_short_url(self, long_url):
    ## if short url already exists for given long url, return existing
    found, url = self.url_repository.find_by_long_url(long_url)

    if found:
      return url
    
    ## if not exists, generate, check if exists, store and return
    short_url = None
    found = True
    while found:
      ##TODO: unlikely that it will be needed, but probably want some forced exit after n attempts 
      short_url = random_url(self.url_length)
      found, url = self.url_repository.find_by_short_url(short_url)

    url = Url(long_url, short_url)
    self.url_repository.add_url(url)
    return url
예제 #6
0
def create():
    for i in data['result']['results']:
        id = i['_id']
        name = i['stitle']
        transport = i['info']
        category = i['CAT2']
        longitude = i['longitude']
        address = i['address']
        latitude = i['latitude']
        describe = i['xbody']
        mrt = i['MRT']
        images = image_site_split(i['file'])
        spt = TravelSpot(id, name, transport, category, longitude, latitude,
                         address, describe, mrt)
        db.session.add(spt)
        for image in images:
            url = Url(id, image)
            db.session.add(url)
        db.session.commit()


#TravelSpot(name,transport,category,longitude,latitude,address,describe,mrt)
#Url()
예제 #7
0
 def make_obj(self, data, **kwargs):
     data['short_url'] = generate_short_url()
     return Url(**data)
예제 #8
0
 def test_url_repr(self):
     u = Url('1234-5678', 'http://python.org')
     v = eval(u.__repr__())
     assert u.url_text == v.url_text
예제 #9
0
파일: app.py 프로젝트: jwyx3/practices
 def get(self, url_hash):
     try:
         url_obj = Url.get(id=url_hash)
         return {}, 301, {'Location': url_obj['url']}
     except DoesNotExist:
         abort(404, description=f'/{url_hash} is not found')
예제 #10
0
파일: urlParser.py 프로젝트: doobeh/scudbot
def process(result):
    if result is None:
        return "No regexp match"

    link = NoneAsEmpty(result[0])
    #If the link is None, return None
    if link is None:
        return "No link in message"

    ltype = NoneAsEmpty(result[1])
    uname = NoneAsEmpty(result[2])
    domain = NoneAsEmpty(result[3])
    ptld = NoneAsEmpty(result[4])
    ip = NoneAsEmpty(result[5])
    #Try and convert the port to an int
    try:
        port = int(NoneAsEmpty(result[6]))
    except (ValueError, TypeError):
        port = None
    pathres = NoneAsEmpty(result[7])

    #validate the TLD including if the TLD is actually real
    variable = TLD()
    istld = variable.Validate(ptld)
    if(not istld):
        #the tld isn't valid, return non
        return "Invalid TLD %s" % ptld

    #Is there a www at any point
    valwww = link.find('www') > -1

    #Determine if the domain of the type xx.yy yy is the tld
    valdomain = re.search("\w+.\w+$",domain)

    #try and find the type of file it is text/plain etc
    gftype = mimetypes.guess_type(link)[0]

    #Determine if an ip is present
    isip = not (ip is None)


    #If there's no ltype (ftp:// http:// etc)
    if ltype is None:
        #If there's no tld, ip, port, www but there is a mime type then it's a file
        if not istld and port is None and not isip and not valwww and gftype is not None:
            #This is a file, return null
            ltype = "File"
        elif not isip and not valwww and pathres is None and not port is None and istld and uname is not None:
            #An email, return null
            ltype =  "Email"
        elif (isip or (valdomain and istld)) and not valwww:
            ltype = 'dcon'
            #Direct connection, try it out
        #No ltype, ip, port or www value
        elif not isip and port is None and not valwww:
            #Unknown format, return null
            ltype = "Unknown"

    if (((valdomain and istld) or isip) and (ltype == 'http' or ltype == 'https' or (ltype is None and valwww) or ltype == 'dcon')):
            #Look at using http://www.crummy.com/software/BeautifulSoup/
        try:
            #Save the proper url for later use
            tlink = link

            #If the url doesn't start with http://, make it so
            if(link.find('http://') == -1 and link.find('https://') == -1):
                link = 'https://' + link

            sys.stdout.write("Querying: %s\n" % link)
            page = urllib2.urlopen(link)
            content_type = None
            content_len  = None

            #Get the content-type and content-length headers
            for header in page.info().headers:
                if header[:header.find(':')].lower() == 'content-type':
                    content_type = header[header.find(':')+1:].strip()

            if content_type is None:
                #If there is no type it's malformed
                return "No Content Type"

            if(re.search("text|(?:application.*xml)",content_type,re.I)):
                #Read the page and get the title
                pagedata = page.read()
                title = re.search("(?<=<title>).*(?=</title>)",pagedata.translate(None,"\t\n\r"), re.IGNORECASE)

                #Get the title
                dtitle = title.group(0)

                #Strip non valid characters and white spaces greater than 2 long
                dtitle = re.sub('[^\[\]\w\s,._\-\/\\}{&;]+', '', dtitle)
                dtitle = re.sub('/\s{2,}', '', dtitle);

                #If the title still has content use it, otherwise, use the url
                title = dtitle if len(dtitle) != 0 else turl;
                url = Url(link)
                url.title = title
                url.page_type = content_type
                url.link_type = ltype
                url.file_type = gftype
                url.img_cached = None
                url.img_thumb = None
            elif(re.search("image\/",content_type, re.I)):
                type_index = content_type.rfind('/')
                ftype = content_type[type_index+1:]
                genfname = genFilename()
                fthumb = genfname+".png"
                fcached = genfname+'.'+ftype

                dst = settings.THUMB_DIR+fthumb
                src = settings.IMAGE_DIR+fcached
                while(os.path.exists(src)):
                    genfname = genFilename()
                    fthumb = genfname+".png"
                    fcached = genfname+'.'+ftype
                    dst = settings.THUMB_DIR+fthumb
                    src = settings.IMAGE_DIR+fcached

                # Open our local file for writing
                local_file = open(src, "w")
                #Write to our local file
                local_file.write(page.read())
                local_file.close()

                img = Image.open(src)
                img.thumbnail((128, 102), Image.ANTIALIAS)
                img.save(dst)

                url = Url(link)
                url.title = None
                url.page_type = content_type
                url.link_type = ltype
                url.file_type = gftype
                url.img_cached = fcached
                url.img_thumb = fthumb
                return url
        except Exception as exc:
            #Deal with HTTPError as per http://www.voidspace.org.uk/python/articles/urllib2.shtml#handling-exceptions
            print exc
            pass
    return "None\n"