def get(self, key):
        """Write a response of an image (or 'no image') based on a key"""
        photo = ndb.Key(urlsafe=key).get()

        url = self.request.url
        parsed = urlparse.urlparse(url)
        query = urlparse.parse_qs(parsed.query)

        if 'id_token' in query:
            id_token = query['id_token'][0]
            if User.auth_photo_user(key, id_token):
                self.response.set_cookie('id_token',
                                         id_token,
                                         max_age=3600,
                                         path='/')
                if blobstore.BlobReader(photo.b_key):
                    blob_reader = blobstore.BlobReader(photo.b_key)
                    blob_reader_data = blob_reader.read()
                    self.response.headers['Content-Type'] = 'image/png'
                    self.response.write(blob_reader_data)
                else:
                    self.response.out.write("No image")
            else:
                self.response.out.write("401 No Authorization \r\n")
                self.response.set_status(401)
        else:
            self.response.out.write("401 No Authorization \r\n")
            self.response.set_status(401)
Beispiel #2
0
def prepare():
    # load index file
    idxfd = blobstore.BlobReader(idxfile_key)
    global idxlist
    idxlist = load_index(idxfd)
    idxfd.close()
    logging.info("prepare() idxlist: size = %d" % (len(idxlist)))

    global dictfd
    dictfd = blobstore.BlobReader(dictfile_key)
Beispiel #3
0
    def get(self):
        # Get the default Cloud Storage Bucket name and create a file name for
        # the object in Cloud Storage.
        bucket = app_identity.get_default_gcs_bucket_name()

        # Cloud Storage file names are in the format /bucket/object.
        filename = '/{}/blobreader_demo'.format(bucket)

        # Create a file in Google Cloud Storage and write something to it.
        with cloudstorage.open(filename, 'w') as filehandle:
            filehandle.write('abcde\n')

        # In order to read the contents of the file using the Blobstore API,
        # you must create a blob_key from the Cloud Storage file name.
        # Blobstore expects the filename to be in the format of:
        # /gs/bucket/object
        blobstore_filename = '/gs{}'.format(filename)
        blob_key = blobstore.create_gs_key(blobstore_filename)

        # [START gae_blobstore_reader]
        # Instantiate a BlobReader for a given Blobstore blob_key.
        blob_reader = blobstore.BlobReader(blob_key)

        # Instantiate a BlobReader for a given Blobstore blob_key, setting the
        # buffer size to 1 MB.
        blob_reader = blobstore.BlobReader(blob_key, buffer_size=1048576)

        # Instantiate a BlobReader for a given Blobstore blob_key, setting the
        # initial read position.
        blob_reader = blobstore.BlobReader(blob_key, position=0)

        # Read the entire value into memory. This may take a while depending
        # on the size of the value and the size of the read buffer, and is not
        # recommended for large values.
        blob_reader_data = blob_reader.read()

        # Write the contents to the response.
        self.response.headers['Content-Type'] = 'text/plain'
        self.response.write(blob_reader_data)

        # Set the read position back to 0, then read and write 3 bytes.
        blob_reader.seek(0)
        blob_reader_data = blob_reader.read(3)
        self.response.write(blob_reader_data)
        self.response.write('\n')

        # Set the read position back to 0, then read and write one line (up to
        # and including a '\n' character) at a time.
        blob_reader.seek(0)
        for line in blob_reader:
            self.response.write(line)
        # [END gae_blobstore_reader]

        # Delete the file from Google Cloud Storage using the blob_key.
        blobstore.delete(blob_key)
Beispiel #4
0
    def handlePostRequest(self):
        action = self.request.get('action', None)
        data = None
        try:
            if action != self.ACTION_ADMIN:
                data = self.request.POST['data']
        except KeyError:
            self.addMessage('You must upload a file.')
        else:
            if action == self.ACTION_HAZARD:
                key = blobstore.parse_blob_info(data)
                zfile = ZipFile(blobstore.BlobReader(key), 'r')
                reader = HazardsImporter(zfile)
                if reader.read():
                    self.addMessage('The operation was successful.',
                                    self.MSG_TYPE_SUCCESS)
                else:
                    self.addMessage('<strong>Error</strong>: %s' %
                                    reader.error)
                key.delete()

            if action == self.ACTION_INGREDIENT:
                key = blobstore.parse_blob_info(data)
                user = User.load(users.get_current_user())
                if not user:
                    user = User.system_user()

                reader = IngredientsImporter(blobstore.BlobReader(key), user)
                if reader.read():
                    self.addMessage('The operation was successful.',
                                    self.MSG_TYPE_SUCCESS)
                else:
                    self.addMessage('<strong>Error</strong>: %s' %
                                    reader.error)
                key.delete()

            if action == self.ACTION_ADMIN:
                email = self.request.get('email')
                user = User.find_by_email(email)
                if not user:
                    user = User.create_by_email(email)

                admin_role = Role.get_admin_role()

                UserRole.create(user, admin_role)

                self.addMessage(
                    'The admin "%s" was successfully added.' % email,
                    self.MSG_TYPE_SUCCESS)

        self.addCommon()
        self.setActivePage('Utilities')
  def get(self, filekey, sheet_name):
	dictd = lambda: defaultdict(dictd)
	y = dictd()
	wb = xlrd.open_workbook(file_contents=blobstore.BlobReader(filekey).read())
	sh = wb.sheet_by_index(int(sheet_name))
	row = 33
#find col were readings start
	for a in range(0, 10):
		y["DAT"] = sh.cell_value(row, a)
		try:
			if "Dat/Sup" in y["DAT"]:
				start_col = a+1
		except:
			pass
	y["start_col"] = start_col

	for a in range(25, 32):
		if "Number" in sh.cell_value(a,0):
			y["start"] = a + 3
  	for c in range(y["start"], sh.nrows , 2):
            y["container"][c] =  sh.cell_value(c,0)
	    y["container"]["ppecbcode"][c] = sh.cell_value(c,1)
    	    y["container"]["vent"][c] =  sh.cell_value(c,2)
	    y["container"]["setpoint"][c] = sh.cell_value(c,3)
            y["rows"] =  sh.nrows
            y["filekey"] =  filekey
            y["sheet_name"] =  sheet_name
       	    for g in range(5, 10):
		try:
	            foo = sh.cell_value(c, g)
		    amDAtemp, pmDAtemp = foo.split("/")
		    foo2 = sh.cell_value(c+1, g)
		    amRAtemp, pmRAtemp = foo2.split("/")
   		    AMdiff = Decimal(amDAtemp) - Decimal(amRAtemp)
		    PMdiff = Decimal(pmDAtemp) - Decimal(pmRAtemp)
	       	    y["AMDiff"][g] = AMdiff
	       	    y["PMDiff"][g] = PMdiff
       		    y["AMDiff"]["class"][g] = "default"
       		    y["PMDiff"]["class"][g] = "default"
 	            if (Decimal(AMdiff) >= Decimal(-0.2)):
		    	y["AMDiff"]["class"][c][g] = "darkgreen"
#			y["colour"][c] = "darkgreen"
	            if (Decimal(AMdiff) >= Decimal(-0.5)):
		    	y["AMDiff"]["class"][c][g] = "lightgreen"
#			y["colour"][c] = "lightgreen"
		    if (Decimal(AMdiff) <= Decimal(-1.0)):
		    	y["AMDiff"]["class"][c][g] = "lightred"
			y["colour"][c] = "lightred"
  	            if (Decimal(AMdiff) <= Decimal(-2.0)):
		    	y["AMDiff"]["class"][c][g] = "darkred"
			y["colour"][c] = "darkred"
		except:
			pass


    	params = {
	    "y": y,

    	}
   	return self.render_template('containerlist.html', **params)
	def get(self):
		get_data = blobstore.BlobInfo.all()
		fkey = get_data.fetch(get_data.count())
		for dat in range(0, get_data.count()):
			filekey = fkey[dat]
			wb = xlrd.open_workbook(file_contents=blobstore.BlobReader(filekey).read())
			y = makeManifestPickle(wb)
			if (y["header"] == "manifest"):
				if not (models.Manifest().find_duplicate(y["vessel"],y["voyage"],y["port"])):
					man = models.Manifest()
					man.blob = filekey.key()
					man.vessel_name = y["vessel"]
					man.voyage = y["voyage"]
					man.port = y["port"]
					man.put()
 #    					search.Index(name='Manifest').put(CreateDocument(y["vessel"], y["voyage"], y["port"], man))
			            	if y["vessel"]:
            					search.Index(name='Manifest').put(CreateDocument(y["vessel"], y["voyage"], y["port"], str(man.key)))
					for c in range(5, y["numrows"]):
						SaveManifestDetail(man.key, y, c, man.vessel_name, man.voyage, man.port)
			else:
				y="y"



		params = {
	  		"y": y,
    			}
		return self.render_template("testman.html", **params)
Beispiel #7
0
    def post(self):
        # Blob for adding a picture to the blob store.
        blob_key = self.request.get('imagekey')
        blob_reader = blobstore.BlobReader(blob_key,buffer_size=1048576)
        
        # Update the curr inventory counter
        counter_key = ndb.Key('Counter','chetna')
        curr_counter = counter_key.get() 
        inventory_number = None;
        
        if  curr_counter == None:
            curr_counter = Counter(id = 'chetna', counter="100000")
            inventory_number = "100000"
        else:
            inventory_number = str(int(curr_counter.counter)+1) 
            curr_counter.counter = inventory_number
            
        curr_counter.put()

        # Build the item.
        item = Item(parent = ndb.Key('Item','chetna'),
                    inventory_number = inventory_number,
                    picture_url=images.get_serving_url(blob_key),
                    buying_price = self.request.get("buying_price"),
                    expected_sale_price = self.request.get("expected_sale_price"),
                    sale_price  = "0",
                    conversion_rate = self.request.get("conversion_rate"),
                    status = "unsold",
                    category = self.request.get("category"),
                    quantity = self.request.get("quantity")
                  )
        item.put()
        self.response.out.write(inventory_number)
 def post(self):
     max_data_access = 1000
     blob_info = self.request.get('blob_key')
     cursor = int(self.request.get('cursor'))
     blob_reader = blobstore.BlobReader(blob_info)
     i = 0
     for row in blob_reader:
         if i >= cursor and i < cursor + max_data_access:
             pays, CP, ville, info1, info2, departement, \
             numDepartement, arrond, numArrond, latitude, longitude, precision = row.split('\t')
             if len(CP) <= 5:
                 entry = Commune(nom=ville,
                                 CP=CP,
                                 departement=numDepartement,
                                 pays=pays,
                                 coordonnees=ndb.GeoPt(latitude + ", " +
                                                       longitude))
                 queryVille = Commune.query(
                     ndb.AND(Commune.nom == entry.nom,
                             Commune.CP == entry.CP))
                 if queryVille.count() == 0:
                     entry.put()
         elif i >= cursor + max_data_access:
             next_cursor = cursor + max_data_access
             taskqueue.add(url='/admin/process_csv',
                           params={
                               'blob_key': blob_info,
                               'cursor': next_cursor
                           },
                           countdown=21600)
             break
         i += 1
Beispiel #9
0
	def get(self, keyval):
		resource = str(urllib.unquote(keyval))
    		blob_info = blobstore.BlobInfo.get(resource)

		wb = xlrd.open_workbook(file_contents=blobstore.BlobReader(blob_info.key()).read())
		sheet = wb.sheet_by_index(0)
		data = [sheet.cell_value(0, col) for col in range(sheet.ncols)]
		if not ("MOL REEFER MANIFEST" in data):
			iname = str(blob_info.filename.replace(" ", ""))
			delete_all_in_index(iname, "text")
			delete_all_in_index(iname, "date")
			delete_all_in_index(iname, "number")
                	y = addVesselData(wb, iname)
			s = matchManifest(iname)
			updateContainerStatus(iname)
			
		else:
			y = makeManifestPickle(wb)
			if not (models.Manifest().find_duplicate(y["vessel"],y["voyage"],y["port"])):
				man = models.Manifest()
				man.blob = blob_info.key()
				man.vessel_name = y["vessel"]
				man.voyage = y["voyage"]
				man.port = y["port"]
				man.put()
				for c in range(5, y["numrows"]):
					SaveManifestDetail(man.key, y, c, man.vessel_name, man.voyage, man.port)
			else:
				y="Manifest added already"

		params = {
	 			"y": y,
    			}
		return self.render_template("testman.html", **params)
Beispiel #10
0
    def post(self):
        user = users.get_current_user()
        minIndex = int(self.request.get("minIndex"))
        maxIndex = int(self.request.get("maxIndex"))
        userID = self.request.get("userID")
        query = UserScreenshot.query(UserScreenshot.index >= minIndex,
                                     UserScreenshot.index < maxIndex,
                                     ancestor=ndb.Key('UserCollection',
                                                      userID)).order(
                                                          UserScreenshot.index)

        results = query.fetch(None)
        images = []
        for entry in results:
            blob_key = entry.blob_key
            blobstore.get(blob_key)
            reader = blobstore.BlobReader(blob_key)
            image = Image.open(reader)
            image.load()
            images.append(image)

        p = ScreenshotParser()
        cards = p.getCardsFromImages(images)
        for i in range(0, len(results)):
            # Bit of a fudge here, could get the ScreenshotParser to give us how many cards per image
            results[i].cards = ""
            for j in range(i * 8, min(i * 8 + 8, len(cards))):
                results[i].cards += str(cards[j].toDict()) + "\n"
            results[i].cards = results[i].cards[:-1]  # Remove last newline
            blobstore.delete(results[i].blob_key)
            results[i].processed = True
            results[i].put()
        return
Beispiel #11
0
def getMappedByCountries(program):
    """Returns a dictionary that maps countries to schools that are located in
  these countries.

  Args:
    program: program entity.

  Returns:
    a dict that maps countries to list of names of schools that are located
    in the given country.
  """
    if program.schools:
        memcache_key = _SCHOOL_LIST_MEMCACHE_KEY_PATTERN % {
            'program_key': program.key().name()
        }
        cached_map = memcache.get(memcache_key)
        if cached_map is not None:
            return json.loads(cached_map)
        else:
            school_map = collections.defaultdict(list)
            schools = getSchoolsFromReader(
                blobstore.BlobReader(program.schools))
            for school in schools:
                school_map[school.country].append(school.name)

            memcache.set(memcache_key, json.dumps(school_map))
            return school_map
    else:
        return {}
Beispiel #12
0
    def post(self):
        # prepare the csv file for reading
        upload_files = self.get_uploads('csv_file')
        blob_info = upload_files[0]
        blob_reader = blobstore.BlobReader(blob_info.key())
        reader = csv.reader(blob_reader, delimiter=',')

        # set up the csv file for download
        self.response.headers['Content-Type'] = 'text/csv'
        self.response.headers['Content-Disposition'] = 'attachment; filename=shorterned_urls.csv'
        writer = csv.writer(self.response.out)
        writer.writerow(['Original Url', 'Short Url'])

        # maximum link to convert is 500,000
        if len(list(reader)) > MAX_URLS_TO_CONVERT:
            raise ValueError("The maximum number of links to be converted is 500,000")
            # A better way to handle this would be with flash messages or alerts, etc

        # convert links, save to DB and download CSV
        for row in reader:
            if len(row) > 1:
                raise ValueError("Input must have a single url per line")
                # better checks can be implemented
            original_url = row[0]
            shortened_url = '{}/{}'.format(
                self.request.application_url,
                Link.shorten_url(original_url))

            writer.writerow([original_url, shortened_url])
def get_reader(key):
    if not isinstance(key, str):
        updated_key = str(urllib.unquote(key))
    else:
        updated_key = key

    return blobstore.BlobReader(updated_key)
Beispiel #14
0
def Process_CSV(blob_info):
    blob_reader = blobstore.BlobReader(blob_info.key())
    reader = csv.DictReader(blob_reader, delimiter=',')
    uni = University(name='Polytechnic University of the Philippines',
                     initials='PUP')
    uni.put()

    college = College(name='Engineering', university_key=uni.key)
    college.put()

    dept = Department(name='Computer Engineering', college_key=college.key)
    dept.put()

    for row in reader:
        user = users.get_current_user()
        tag_list = []
        title_list = row['Title'].lower().split(' ')
        thesis = Thesis(id=''.join(title_list))
        tag_list.extend(title_list)
        thesis.year = int(row['Year'])
        thesis.title = row['Title']
        thesis.abstract = row['Abstract']
        thesis.section = int(row['Section'])
        faculty = Faculty.get_by_name(row['Adviser'].lower().replace(' ', ''))
        if faculty is None:
            if len(row['Adviser']) > 0:
                faculty = Faculty(
                    id=row['Adviser'].lower().replace(' ', ''),
                    first_name=row['Adviser'].split(' ')[0].title(),
                    last_name=row['Adviser'].split(' ')[1].title())
                tag_list.extend(row['Adviser'].lower().split(' '))
            else:
                faculty = Faculty(id='Anonymous')
            faculty.put()
            thesis.adviser = faculty.key
        else:
            tag_list.extend(row['Adviser'].lower().split(' '))
            thesis.adviser = faculty.key
        member_list = [
            row['Member 1'], row['Member 2'], row['Member 3'], row['Member 4'],
            row['Member 5']
        ]
        member_keys = [
        ]  #.lower().replace(' ', '').replace(',', '').replace('.', '')
        for member in member_list:
            if member != '':
                tag_list.extend(member.lower().split(' '))
                student = Student(name=member)
                student.put()
                member_keys.append(student.key)
        for tag in tag_list:
            tag.replace('.', '').replace(',', '').replace(':',
                                                          '').replace(';', '')
        thesis.members = member_keys
        dept = Department.get_department(row['Department'], row['College'],
                                         row['University'])
        thesis.tags = tag_list
        thesis.department = dept.key
        thesis.created_by = user.nickname()
        thesis.put()
Beispiel #15
0
def serve_work(request, photo_key):
    blob = blobstore.get(photo_key)
    if not blob:
        raise Http404
    else:
        return HttpResponse(blobstore.BlobReader(blob.key()).read(),
                            content_type=blob.content_type)
Beispiel #16
0
 def post(self):
     check_login(self)
     mp3_list = self.get_uploads('mp3')
     ogg_list = self.get_uploads('ogg')
     mei_list = self.get_uploads('mei')
     page_list = self.get_uploads('page')
     if not mp3_list or not ogg_list or not page_list:
         self.redirect("/?empty=1")
         return
     mp3 = mp3_list[0].key()
     ogg = ogg_list[0].key()
     page_list = [page.key() for page in page_list]
     if mei_list:
         mei = mei_list[0].key()
         json_data = parse_mei(blobstore.BlobReader(mei).read())
     else:
         mei = None
         json_data = make_empty_data(len(page_list))
     song = Song(
         mp3=mp3,
         ogg=ogg,
         mei=mei,
         json=json.dumps(json_data),
         page_list=page_list,
     )
     song.put()
     self.redirect("/box_edit/{0}".format(song.key().id()))
Beispiel #17
0
    def post(self, key):

        upload = self.get_uploads()[0]

        ref_import = RefImport(blob_key=str(upload.key()),
                               root=key,
                               state="processing",
                               all_count=upload.size)
        ref_import.put()

        reader = blobstore.BlobReader(upload.key())

        try:
            self.read_header(ref_import, reader)
            ref_import.incr(reader.tell())
        except ParseImportHeaderError:
            ref_import.state = "error"

        ref_import.put()

        if ref_import.state != "error":
            taskqueue.add(url="/service/ref/import/task/%s/%d" %
                          (str(ref_import.key()), reader.tell()),
                          method='GET')

        self.error(301)
Beispiel #18
0
    def get(self):
        """Loads the data file located at |filename|.

    Args:
      filename: The filename for the data file to be loaded.
    """
        yesterday = datetime.date.today() - datetime.timedelta(1)
        yesterday_formatted = yesterday.strftime("%Y.%m.%d")

        filename = 'histograms/daily/%s/Everything' % (yesterday_formatted)

        if settings.PROD:
            try:
                with cloudstorage.open(BIGSTORE_BUCKET + filename,
                                       'r') as unused_f:
                    pass
            except cloudstorage.errors.Error, e:
                logging.error(e)
                self.response.write(e)
                return

            # The file exists; serve it.
            blob_key = blobstore.create_gs_key('/gs' + BIGSTORE_BUCKET +
                                               filename)
            blob_reader = blobstore.BlobReader(blob_key, buffer_size=3510000)
            try:
                result = blob_reader.read()
            finally:
                blob_reader.close()
Beispiel #19
0
	def get(self, keyval):
		resource = str(urllib.unquote(keyval))
    		blob_info = blobstore.BlobInfo.get(resource)

		wb = xlrd.open_workbook(file_contents=blobstore.BlobReader(blob_info.key()).read())
                v = addVesselData(wb, blob_info.filename.replace(" ", ""))
		y = makeManifestPickle(wb)
		if (y["header"] == "manifest"):
			if not (models.Manifest().find_duplicate(y["vessel"],y["voyage"],y["port"])):
				man = models.Manifest()
				man.blob = blob_info.key()
				man.vessel_name = y["vessel"]
				man.voyage = y["voyage"]
				man.port = y["port"]
				man.put()
				for c in range(5, y["numrows"]):
					SaveManifestDetail(man.key, y, c, man.vessel_name, man.voyage, man.port)
			else:
				y="Manifest added already"
                         


		params = {
	 			"y": y,
    			}
		return self.render_template("testman.html", **params)
Beispiel #20
0
 def generate_attachments_zip(self, project):
     output_string = StringIO.StringIO()
     with zipfile.ZipFile(output_string, u'w', zipfile.ZIP_DEFLATED) as zip_file:
         for attachment in dao.get_attachments_by_project(project):
             blob_reader = blobstore.BlobReader(attachment.blob_key)
             zip_file.writestr(attachment.filename, blob_reader.read())
     return output_string.getvalue()
Beispiel #21
0
 def get(self):
     for b in blobstore.BlobInfo.all():
         blob_reader = blobstore.BlobReader(b.key())
         data = blob_reader.read()
         self.response.out.write(str(data)+str(b.size))
         for i in range(0,20):
             self.response.out.write('<br>')
Beispiel #22
0
    def post(self):
        workbook = self.request.get("workbook")
        token =  self.request.get("token")
        secret =  self.request.get("secret")
        username = self.request.get("username")
        uuid = self.request.get("uuid")
        blobkey = self.request.get("content")
        try:
            service = springpad.SpringRpcService()
            service.fetcher = springpad.OAuthFetcher(APPLICATION_KEY, APPLICATION_SECRET,access_token=oauth.OAuthToken(token,secret))
            service.set_user_context(username, uuid)
            mutator = service.get_mutator()

            blob_reader = blobstore.BlobReader(blobkey)
            parser = evernotebookparser.NotebookParser2(blob_reader)
            parser.get_items(lambda x: createItem(mutator,workbook,x))
            logging.info("Serviced a customer")
            blob_reader.close()
            blobstore.delete(blobkey)
        except Exception,e:
            logging.exception("-------%s"%e.message)
            try:
                blobstore.delete(blobkey)
            except :
                pass
Beispiel #23
0
    def post(self):
        upload_files = self.get_uploads(
            'file')  # 'file' is file upload field in the form
        if len(upload_files) == 1:
            blob_info = upload_files[0]
            blob_reader = blobstore.BlobReader(blob_info)

            pattern = re.compile(
                r"""
                (?P<datetime>[\d-]+\s[\d:,]+)\s          # gets the datetime
                \[INFO\]\s root:\s                             # eats the info and root
                \[(?P<ip>[\d.]+)\]\s                          # gets the ip
                (?P<page>[\w]+)                             # gets the page       
                """, re.VERBOSE)

            for line in blob_reader:
                match = pattern.match(line)
                if match:
                    groups = match.groupdict()
                    dt = datetime.strptime(groups['datetime'],
                                           '%Y-%m-%d %H:%M:%S,%f')
                    log = Log(ip=groups['ip'],
                              datetime=dt,
                              page=groups['page'])
                    log.put_async()

            blob_info.delete()

        self.redirect('/')
def write_csv_row_objects(csv_file_obj_key):
    csv_file_obj = CSVFile.get(csv_file_obj_key)
    event = csv_file_obj.event
    blob_fd = blobstore.BlobReader(csv_file_obj.blob)
    blob_fd.seek(0)
    encoding = csv_file_obj.encoding
    try:
        for row_num, row in read_csv(event, blob_fd, encoding):
            csv_row_obj = CSVRow(
                ### parent=csv_file_obj.key(),
                csv_file=csv_file_obj.key(),
                num=row_num,
                row=[db.Text(cell) for cell in row])

            csv_row_obj.save()

            # analyse row in background
            deferred.defer(
                analyse_row,
                csv_file_obj.key(),
                csv_row_obj.key(),
                _countdown=int(0.2 * int(row_num))  # delay start
            )
    except HeaderException, e:
        logging.exception(HeaderException)
        csv_file_obj.header_present = False
        csv_file_obj.save()
Beispiel #25
0
    def upload_creative_asset(self,
                              asset_type,
                              filename,
                              asset_key,
                              advertiser_id,
                              retry_count=0):
        try:
            creative_asset = {
                'assetIdentifier': {
                    'name': filename,
                    'type': asset_type
                }
            }

            asset_file = blobstore.BlobReader(asset_key)
            mimetype = blobstore.BlobInfo(asset_key).content_type
            media = http.MediaIoBaseUpload(asset_file,
                                           mimetype=mimetype,
                                           resumable=False)

            return self.service.creativeAssets().insert(
                advertiserId=advertiser_id,
                profileId=self.profile_id,
                media_body=media,
                body=creative_asset).execute()
        except http.HttpError, e:
            if e.resp.status in [403, 500, 503
                                 ] and retry_count < self.MAX_RETRIES:
                return self.upload_creative_asset(asset_type, filename,
                                                  asset_key, advertiser_id,
                                                  retry_count + 1)
            else:
                raise
Beispiel #26
0
 def post(self):
     auname=self.request.get('pname')
     print auname
     if(len(auname) == 0 ):
         self.redirect('/')
         return
     aufile=self.request.get('file')
     print auname
     if(len(aufile) == 0 ):
         self.redirect('/')
         return
     timestampnow = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
     timestamp2 =datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
 # Upload the file into a blob
     upload_files = self.get_uploads('file')  # 'file' is file upload field in the form
     if(len(upload_files) == 0 ):
          print "Upload file had no data "
          return
     blob_info = upload_files[0]
     blob_reader = blobstore.BlobReader(blob_info)
     attfile  =  auname +"_"+timestamp2+ "_"+ blob_info.filename
     if(blob_info.size > 20971520*4):
         self.response.out.write('<br> File Rejected. ERROR: FILE SIZE IS ABOVE LIMIT (80 MB). Please Resubmit <BR>')
         return
     self.response.out.write(' <br> File Received </h1> Received from: %s  at UTC %s  <br>' % (auname, timestampnow))
     self.response.out.write(' File received was: %s type: %s <br>' % (blob_info.filename,blob_info.content_type))
Beispiel #27
0
  def post(self):
    key = self.request.get("key")
    upload = ndb.Key(urlsafe = key).get()
    account = upload.account_key.get()
    blob_reader = blobstore.BlobReader(upload.blob_key, buffer_size=524288)
    logging.info("Unpacking")

    logging.info("parsing archive files")
    raw = account.raw_uploaded_data()
    raw.data = []
    with zipfile.ZipFile(blob_reader, 'r') as myzip:
      for name in myzip.namelist():
        if name.startswith("data/js/tweets") and name.endswith(".js"):
          raw_json = myzip.read(name)
          raw_json = raw_json[raw_json.find("["):]
          statuses = parse_raw_twitter(raw_json)
          raw.data += statuses
    raw.put()
    
    account.update_status(model.STATUS_FILE_UNPACKED);
    taskqueue.add(queue_name='default',
                  url='/tasks/parse',
                  params={'key' : raw.key.urlsafe(), 'clean_urls' : 'true', 'send_email' : 'True'})
    logging.info("Unpacked from zip to datastore")
    memcache.delete("unpack_%s" % account.key.urlsafe())
Beispiel #28
0
    def get(self):

        user = self.verify()
        if "all" in self.request.arguments():
            models = get_all_items_user(ImageModel, user)

            data = [{
                "colours": [
                    RGBToString(j[1])
                    for j in Image.open(blobstore.BlobReader(
                        i.image.key())).convert('RGB').getcolors()
                ],
                "image":
                images.get_serving_url(i.image),
                "key":
                str(i.key()),
                "earth_models": [
                    j.json for j in EarthModel.all().ancestor(i).filter(
                        "user =", user.user_id if user else None).fetch(1000)
                ]
            } for i in models]

        else:
            self.error(502)

        self.response.headers['Content-Type'] = 'application/json'
        self.response.out.write(json.dumps(data))
Beispiel #29
0
    def post(self):
        author = str(self.request.get('author'))
        blob_key = str(self.request.get('blob_key'))
        q = EntryIndex.all()
        q.filter("author =", users.User(author))
        q.filter("blob_key =", blob_key)
        if q.count() > 0:
            return

        def txn():
            entryindex = EntryIndex()
            entryindex.author = users.User(author)
            entryindex.blob_key = blob_key
            entryindex.put()

        db.run_in_transaction(txn)

        if not blobstore.get(blob_key):
            return
        else:
            blob_reader = blobstore.BlobReader(blob_key)
            data = blob_reader.read()
            t = xml.etree.ElementTree.fromstring(data)
            pre_lat, pre_long, pre_time, pre_alt, pre_heart = None, None, None, None, None
            t_dist = 0
            t_alt = 0
            rec = 0
            for e in t.getiterator():
                if e.tag.find("Time") > 0:
                    time = e.text
                if e.tag.find("LatitudeDegrees") > 0:
                    lat = float(e.text)
                if e.tag.find("LongitudeDegrees") > 0:
                    long = float(e.text)
                if e.tag.find("AltitudeMeters") > 0:
                    alt = float(e.text)
                if e.tag.find("HeartRateBpm") > 0:
                    heart = int(e[0].text)
                    if pre_lat != None:
                        delta = distance([lat, long], [pre_lat, pre_long])
                        if fabs(alt - pre_alt) >= 1.2:
                            t_alt = t_alt + fabs(alt - pre_alt)
                        t_dist += delta
                        taskqueue.add(url='/queue1',
                                      params={
                                          'author': author,
                                          'blob_key': blob_key,
                                          'lat': lat,
                                          'long': long,
                                          'rec': rec,
                                          'heart': heart,
                                          'alt': alt,
                                          't_alt': t_alt,
                                          'dist': delta,
                                          't_dist': t_dist,
                                          'time': time
                                      },
                                      queue_name='queue1')
                    pre_lat, pre_long, pre_time, pre_alt, pre_hr = lat, long, time, alt, heart
                    rec = rec + 1
Beispiel #30
0
    def get(self):
        id = self.request.get('id', None)
        report = ReportModel.get_by_id(int(id))

        reproSteps = None
        reporterVersion = None
        if report.userProps:
            reportMetaBlobData = blobstore.BlobReader(report.userProps).read()

            reportMetaPlist = plistlib.readPlistFromString(reportMetaBlobData)
            reproSteps = reportMetaPlist.get('ReproSteps', None)
            reporterVersion = reportMetaPlist.get('ReporterVersion', None)

        template_values = {
            'id':
            id,
            'header':
            "Report %u (%s)" %
            (int(id), report.date.strftime('%Y-%m-%d %H:%M:%S')),
            'repro':
            reproSteps,
            'reporterVersion':
            reporterVersion,
            'comment':
            report.comment,
        }

        path = os.path.join(os.path.dirname(__file__), 'templates',
                            'manage_view.html')
        self.response.out.write(template.render(path, template_values))