Пример #1
0
    async def download(self, request):

        if request.username:
            database_name = hashlib.sha256(
                request.username.encode("utf-8")).hexdigest()
        else:
            raise exceptions.Unauthorized("A valid token is needed")

        data = await request.post()
        hash = data["hash"]

        file_path = storage.get_file(hash)
        dotfile_path = storage.get_file("." + hash)
        if not os.path.exists(file_path) or not os.path.exists(dotfile_path):
            raise exceptions.NotFound("file <{}> does not exist".format(hash))
        with open(dotfile_path) as dotfile:
            dotfile_content = json.load(dotfile)[database_name]
            name = dotfile_content["name"]

        response = web.StreamResponse()
        response.headers["Content-Type"] = "application/octet-stream"
        response.headers[
            "Content-Disposition"] = "attachment; filename*=UTF-8''{}".format(
                urllib.parse.quote(name, safe="")  # replace with the filename
            )
        response.enable_chunked_encoding()
        await response.prepare(request)

        with open(file_path, "rb") as fd:  # replace with the path
            for chunk in iter(lambda: fd.read(1024), b""):
                await response.write(chunk)
        await response.write_eof()

        return response
Пример #2
0
def saved_resource_page():
    id_token = request.cookies.get("token")
    claims = None
    error_message = None
    try:
        claims = google.oauth2.id_token.verify_firebase_token(
            id_token, firebase_request_adapter)
    except ValueError as exc:
        error_message = str(exc)
    print(claims['email'])

    email = claims['email']

    resource_name = request.args['name']
    description, input_type, output_type, input_language, output_language = database.getAResource(
        email, resource_name)

    input_resource = storage.get_file(email, resource_name, "input")
    output_resource = storage.get_file(email, resource_name, "output")

    if input_type == "text": input_resource.decode('utf-8')
    if output_type == "text": output_resource.decode('utf-8')

    print(description, input_type, output_type, input_resource,
          output_resource)

    return render_template('savedResource.html',
                           resource_name=resource_name,
                           input_type=input_type,
                           output_type=output_type,
                           input_language=input_language,
                           output_language=output_language,
                           input_resource=input_resource,
                           output_resource=output_resource)
Пример #3
0
    async def delete(self, request):

        if request.username:
            database_name = hashlib.sha256(
                request.username.encode("utf-8")).hexdigest()
        else:
            raise exceptions.Unauthorized("A valid token is needed")

        data = await request.post()
        hash = data["hash"]

        file_path = storage.get_file(hash)
        dotfile_path = storage.get_file("." + hash)
        if not os.path.exists(file_path) or not os.path.exists(dotfile_path):
            raise exceptions.NotFound("file <{}> does not exist".format(hash))
        with open(dotfile_path) as dotfile:
            dotfile_content = json.load(dotfile)
            user_dotfile_content = dotfile_content[database_name]
            spaces = user_dotfile_content["spaces"]
        storage.atto.Database(database_name).remove_data((hash, spaces))
        if len(dotfile_content) == 1:
            os.remove(file_path)
            os.remove(dotfile_path)
        else:
            del dotfile_content[database_name]
            with open(dotfile_path, "w") as dotfile:
                json.dump(dotfile_content, dotfile)
        return web.json_response({"deleted": True})
Пример #4
0
    def parse(self):
        """read in a file and return a MOPFile object."""
        self.filename =  storage.get_file(self.expnum,
                                       self.ccd,
                                       ext=self.extension,
                                       version=self.type,
                                       prefix=self.prefix)
        self.fobj = open(self.filename,'r')
        lines = self.fobj.read().split('\n')
        self.header = HeaderParser(self.extension).parser(lines)
        if 'matt' in self.extension:
            usecols=[0,1,2,3,4]
            data = numpy.genfromtxt(self.filename, usecols=usecols)
        else:
            data = numpy.genfromtxt(self.filename)
        self.data = Table(data, names=self.header.column_names[0:data.shape[1]])
        ast_header = storage.get_astheader(self.expnum, self.ccd)
        self.wcs = wcs.WCS(ast_header)
        flip_these_extensions = range(1,19)
        flip_these_extensions.append(37)
        flip_these_extensions.append(38)
        if self.ccd + 1 in flip_these_extensions:
            self.data['X'] = float(self.header.keywords['NAX1'][0])-self.data['X'] + 1
            self.data['Y'] = float(self.header.keywords['NAX2'][0])-self.data['Y'] + 1
        ra, dec = self.wcs.xy2sky(self.data['X'], self.data['Y'], usepv=True)

        self.data.add_columns([Column(ra, name='RA_J2000'), Column(dec, name='DE_J2000')])
        return self
Пример #5
0
def handle_read(packet, wfile):
    fp = storage.get_file(packet.filepath[1:])
    if fp is None:
        return ERR_NOENT

    bdata = fp.read(packet.offset, packet.length)

    Header(RESP_READ, len(bdata)).to_stream(wfile)
    wfile.write(bdata)
Пример #6
0
def handle_stat(packet, wfile):
    #fp = os.path.join(config.DATA_DIR, packet.filepath[1:])
    #sr = do_stat(fp)
    fp = storage.get_file(packet.filepath[1:])
    if fp is None:
        return ERR_NOENT

    sr = do_stat(fp)
    ser = sr.SerializeToString()
    Header(RESP_STAT, len(ser)).to_stream(wfile)
    wfile.write(ser)
Пример #7
0
def handle_listdir(packet, wfile):
    dp = os.path.join(config.DATA_DIR, packet.dirpath[1:])
    print(dp)

    lr = RespListdir()
    with database.connect() as c:
        rows = c.execute('SELECT * FROM Files').fetchall()
        for f in rows:
            fp = storage.get_file(f[1])
            sr = do_stat(fp)

            le = lr.entry.add()
            le.filename = f[1]
            le.stat.CopyFrom(sr)

    ser = lr.SerializeToString()
    Header(RESP_LISTDIR, len(ser)).to_stream(wfile)
    wfile.write(ser)
Пример #8
0
    async def search(self, request):

        if request.username:
            database_name = hashlib.sha256(
                request.username.encode("utf-8")).hexdigest()
        else:
            raise exceptions.Unauthorized("A valid token is needed")

        data = await request.post()
        if "spaces" not in data:
            raise exceptions.UserError("you must specify spaces")

        results = storage.atto.Database(database_name).inter(
            data["spaces"].split())

        output = {}
        for result in results:
            with open(storage.get_file("." + result[0])) as json_file:
                dotfile_content = json.load(json_file)[database_name]
                dotfile_content["spaces"] = list(result[1])
                output[result[0]] = dotfile_content

        return web.json_response({"results": output})
Пример #9
0
    async def upload(self, request):
        """ EXAMPLE (curl)
        curl -H "Authorization:auth" -F "name=Awesome Background" -F "type=image" -F "desc=A cool image" -F "hash=617Y7DY73y2" -F "chunk=0" 
        -F "spaces=['A','B']" -F "file=@./background.jpg" -X POST localhost:8080/upload
        """

        if request.username:
            database_name = hashlib.sha256(
                request.username.encode("utf-8")).hexdigest()
        else:
            raise exceptions.Unauthorized("A valid token is needed")

        reader = await request.multipart()

        # infos

        field = await reader.next()
        assert field.name == "name"
        name = (await field.read()).decode("utf-8")

        field = await reader.next()
        assert field.name == "type"
        content_type = (await field.read()).decode("utf-8")

        field = await reader.next()
        assert field.name == "desc"
        description = (await field.read()).decode("utf-8")

        # sha256
        field = await reader.next()
        assert field.name == "hash"
        hash = (await field.read()).decode("utf-8")

        # chunk index
        field = await reader.next()
        assert field.name == "chunk"

        # spaces
        field = await reader.next()
        assert field.name == "spaces"
        spaces = (await field.read()).decode("utf-8").split()

        # file
        field = await reader.next()

        # create files folder if not created
        Path(storage.get_folder()).mkdir(parents=True, exist_ok=True)

        # cannot rely on Content-Length because of chunked transfer
        size = 0
        with open(storage.get_file(hash), "wb") as f:
            while True:
                chunk = await field.read_chunk()  # 8192 bytes by default.
                if not chunk:
                    break
                size += len(chunk)
                f.write(chunk)

        # save file infos
        dotfile_path = storage.get_file("." + hash)
        if os.path.exists(dotfile_path):
            with open(dotfile_path, "r") as dotfile:
                data = json.load(dotfile)
        else:
            data = {}
        with open(dotfile_path, "w") as dotfile:
            data[database_name] = {
                "name": name,
                "type": content_type,
                "desc": description,
            }
            json.dump(data, dotfile)

        storage.atto.Database(database_name).add_data((hash, spaces))
        return web.json_response({"stored": True, "size": size})
Пример #10
0
def align(expnums, ccd, version='s', dry_run=False):
    """Create a 'shifts' file that transforms the space/flux/time scale of all images to the first image.

    This function relies on the .fwhm, .trans.jmp, .phot and .zeropoint.used files for inputs.
    The scaling we are computing here is for use in planting sources into the image at the same sky/flux locations
    while accounting for motions of sources with time.

    :param expnums: list of MegaPrime exposure numbers to add artificial KBOs to,
                    the first frame in the list is the reference.
    :param ccd: which ccd to work on.
    :param version: Add sources to the 'o', 'p' or 's' images
    :param dry_run: don't push results to VOSpace.
    """

    # Get the images and supporting files that we need from the VOSpace area
    # get_image and get_file check if the image/file is already on disk.
    # re-computed fluxes from the PSF stars and then recompute x/y/flux scaling.

    # some dictionaries to hold the various scale
    pos = {}
    apcor = {}
    mags = {}
    zmag = {}
    mjdates = {}

    for expnum in expnums:
        filename = storage.get_image(expnum, ccd=ccd, version=version)
        zmag[expnum] = storage.get_zeropoint(expnum, ccd, prefix=None, version=version)
        mjdates[expnum] = float(fits.open(filename)[0].header.get('MJD-OBS'))
        apcor[expnum] = [float(x) for x in open(storage.get_file(expnum,
                                                                 ccd=ccd,
                                                                 version=version,
                                                                 ext=storage.APCOR_EXT)).read().split()]
        keys = ['crval1', 'cd1_1', 'cd1_2', 'crval2', 'cd2_1', 'cd2_2']
        # load the .trans.jmp values into a 'wcs' like dictionary.
        # .trans.jmp maps current frame to reference frame in pixel coordinates.
        # the reference frame of all the frames supplied must be the same.
        shifts = dict(zip(keys, [float(x) for x in open(storage.get_file(expnum,
                                                                         ccd=ccd,
                                                                         version=version,
                                                                         ext='trans.jmp')).read().split()]))
        shifts['crpix1'] = 0.0
        shifts['crpix2'] = 0.0
        # now create a wcs object based on those transforms, this wcs links the current frame's
        # pixel coordinates to the reference frame's pixel coordinates.
        w = get_wcs(shifts)

        # get the PHOT file that was produced by the mkpsf routine
        logging.debug("Reading .phot file {}".format(expnum))
        phot = ascii.read(storage.get_file(expnum, ccd=ccd, version=version, ext='phot'), format='daophot')

        # compute the small-aperture magnitudes of the stars used in the PSF
        import daophot
        logging.debug("Running phot on {}".format(filename))
        mags[expnum] = daophot.phot(filename,
                                    phot['XCENTER'],
                                    phot['YCENTER'],
                                    aperture=apcor[expnum][0],
                                    sky=apcor[expnum][1] + 1,
                                    swidth=apcor[expnum][0],
                                    zmag=zmag[expnum])

        # covert the x/y positions to positions in Frame 1 based on the trans.jmp values.
        logging.debug("Doing the XY translation to refrence frame: {}".format(w))
        (x, y) = w.wcs_pix2world(mags[expnum]["XCENTER"], mags[expnum]["YCENTER"], 1)
        pos[expnum] = numpy.transpose([x, y])
        # match this exposures PSF stars position against those in the first image of the set.
        logging.debug("Matching lists")
        idx1, idx2 = util.match_lists(pos[expnums[0]], pos[expnum])

        # compute the magnitdue offset between the current frame and the reference.
        dmags = numpy.ma.array(mags[expnums[0]]["MAG"] - apcor[expnums[0]][2] -
                               (mags[expnum]["MAG"][idx1] - apcor[expnum][2]),
                               mask=idx1.mask)
        dmags.sort()
        logging.debug("Computed dmags between input and reference: {}".format(dmags))
        error_count = 0

        error_count += 1
        logging.debug("{}".format(error_count))

        # compute the median and determine if that shift is small compared to the scatter.
        try:
            midx = int(numpy.sum(numpy.any([~dmags.mask], axis=0)) / 2.0)
            dmag = float(dmags[midx])
            logging.debug("Computed a mag delta of: {}".format(dmag))
        except Exception as e:
            logging.error(str(e))
            logging.error("Failed to compute mag offset between plant and found using: {}".format(dmags))
            dmag = 99.99

        error_count += 1
        logging.debug("{}".format(error_count))

        try:
            if math.fabs(dmag) > 3 * (dmags.std() + 0.01):
                logging.warning("Magnitude shift {} between {} and {} is large: {}".format(dmag,
                                                                                           expnums[0],
                                                                                           expnum,
                                                                                           shifts))
        except Exception as e:
            logging.error(str(e))

        error_count += 1
        logging.debug("{}".format(error_count))

        shifts['dmag'] = dmag
        shifts['emag'] = dmags.std()
        shifts['nmag'] = len(dmags.mask) - dmags.mask.sum()
        shifts['dmjd'] = mjdates[expnums[0]] - mjdates[expnum]
        shift_file = os.path.basename(storage.get_uri(expnum, ccd, version, '.shifts'))

        error_count += 1
        logging.debug("{}".format(error_count))

        try:
            fh = open(shift_file, 'w')
            fh.write(json.dumps(shifts, sort_keys=True, indent=4, separators=(',', ': ')))
            fh.write('\n')
            fh.close()
        except Exception as e:
            logging.error("Creation of SHIFTS file failed while trying to write: {}".format(shifts))
            raise e

        error_count += 1
        logging.debug("{}".format(error_count))

        if not dry_run:
            storage.copy(shift_file, storage.get_uri(expnum, ccd, version, '.shifts'))