Exemplo n.º 1
0
 def create(self, obj, **kwargs):
     if not self.exists(obj, **kwargs):
         #print "S3 OS creating a dataset with ID %s" % dataset_id
         # Pull out locally used fields
         extra_dir = kwargs.get('extra_dir', None)
         extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
         dir_only = kwargs.get('dir_only', False)
         alt_name = kwargs.get('alt_name', None)
         # print "---- Processing: %s; %s" % (alt_name, locals())
         # Construct hashed path
         rel_path = os.path.join(*directory_hash_id(obj))
         # Optionally append extra_dir
         if extra_dir is not None:
             if extra_dir_at_root:
                 rel_path = os.path.join(extra_dir, rel_path)
             else:
                 rel_path = os.path.join(rel_path, extra_dir)
         # Create given directory in cache
         cache_dir = os.path.join(self.staging_path, rel_path)
         if not os.path.exists(cache_dir):
             os.makedirs(cache_dir)
         # Although not really necessary to create S3 folders (because S3 has
         # flat namespace), do so for consistency with the regular file system
         # S3 folders are marked by having trailing '/' so add it now
         # s3_dir = '%s/' % rel_path
         # self._push_to_s3(s3_dir, from_string='')
         # If instructed, create the dataset in cache & in S3
         if not dir_only:
             rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
             open(os.path.join(self.staging_path, rel_path), 'w').close()
             self._push_to_s3(rel_path, from_string='')
Exemplo n.º 2
0
    def create(self, obj, **kwargs):
        if not self.exists(obj, **kwargs):
            #print "S3 OS creating a dataset with ID %s" % kwargs
            # Pull out locally used fields
            extra_dir = kwargs.get('extra_dir', None)
            extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
            dir_only = kwargs.get('dir_only', False)
            alt_name = kwargs.get('alt_name', None)
            # print "---- Processing: %s; %s" % (alt_name, locals())
            # Construct hashed path
            rel_path = os.path.join(*directory_hash_id(obj.id))

            # Optionally append extra_dir
            if extra_dir is not None:
                if extra_dir_at_root:
                    rel_path = os.path.join(extra_dir, rel_path)
                else:
                    rel_path = os.path.join(rel_path, extra_dir)
            # Create given directory in cache
            cache_dir = os.path.join(self.staging_path, rel_path)
            if not os.path.exists(cache_dir):
                os.makedirs(cache_dir)
            # Although not really necessary to create S3 folders (because S3 has
            # flat namespace), do so for consistency with the regular file system
            # S3 folders are marked by having trailing '/' so add it now
            # s3_dir = '%s/' % rel_path
            # self._push_to_os(s3_dir, from_string='')
            # If instructed, create the dataset in cache & in S3
            if not dir_only:
                rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
                open(os.path.join(self.staging_path, rel_path), 'w').close()
                self._push_to_os(rel_path, from_string='')
Exemplo n.º 3
0
 def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
     rel_path = os.path.join(*directory_hash_id(obj.id))
     if extra_dir is not None:
         if extra_dir_at_root:
             rel_path = os.path.join(extra_dir, rel_path)
         else:
             rel_path = os.path.join(rel_path, extra_dir)
     # S3 folders are marked by having trailing '/' so add it now
     rel_path = '%s/' % rel_path
     if not dir_only:
         rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
     return rel_path
Exemplo n.º 4
0
 def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
     rel_path = os.path.join(*directory_hash_id(obj.id))
     if extra_dir is not None:
         if extra_dir_at_root:
             rel_path = os.path.join(extra_dir, rel_path)
         else:
             rel_path = os.path.join(rel_path, extra_dir)
     # S3 folders are marked by having trailing '/' so add it now
     rel_path = '%s/' % rel_path
     if not dir_only:
         rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
     return rel_path
Exemplo n.º 5
0
    def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
        """ Construct the expected absolute path for accessing the object
            identified by `obj`.id.
        
        :type base_dir: string
        :param base_dir: A key in self.extra_dirs corresponding to the base
                         directory in which this object should be created, or
                         None to specify the default directory.

        :type dir_only: bool
        :param dir_only: If True, check only the path where the file
                         identified by `obj` should be located, not the
                         dataset itself. This option applies to `extra_dir`
                         argument as well.
        
        :type extra_dir: string
        :param extra_dir: Append the value of this parameter to the expected path
                          used to access the object identified by `obj`
                          (e.g., /files/000/<extra_dir>/dataset_10.dat).
        
        :type alt_name: string
        :param alt_name: Use this name as the alternative name for the returned
                         dataset rather than the default.
        
        :type old_style: bool
        param old_style: This option is used for backward compatibility. If True
                         the composed directory structure does not include a hash id
                         (e.g., /files/dataset_10.dat (old) vs. /files/000/dataset_10.dat (new))
        """
        base = self.extra_dirs.get(base_dir, self.file_path)
        if old_style:
            if extra_dir is not None:
                path = os.path.join(base, extra_dir)
            else:
                path = base
        else:
            # Construct hashed path
            rel_path = os.path.join(*directory_hash_id(obj.id))
            # Optionally append extra_dir
            if extra_dir is not None:
                if extra_dir_at_root:
                    rel_path = os.path.join(extra_dir, rel_path)
                else:
                    rel_path = os.path.join(rel_path, extra_dir)
            path = os.path.join(base, rel_path)
        if not dir_only:
            path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
        return os.path.abspath(path)
Exemplo n.º 6
0
 def test_basic(self):
     # Start the database and connect the mapping
     model = mapping.init("/tmp", "sqlite:///:memory:", create_tables=True)
     assert model.engine is not None
     # Make some changes and commit them
     u = model.User(email="*****@*****.**", password="******")
     # gs = model.GalaxySession()
     h1 = model.History(name="History 1", user=u)
     #h1.queries.append( model.Query( "h1->q1" ) )
     #h1.queries.append( model.Query( "h1->q2" ) )
     h2 = model.History(name=("H" * 1024))
     model.session.add_all((u, h1, h2))
     #q1 = model.Query( "h2->q1" )
     d1 = model.HistoryDatasetAssociation(extension="interval",
                                          metadata=dict(chromCol=1,
                                                        startCol=2,
                                                        endCol=3),
                                          history=h2,
                                          create_dataset=True,
                                          sa_session=model.session)
     #h2.queries.append( q1 )
     #h2.queries.append( model.Query( "h2->q2" ) )
     model.session.add((d1))
     model.session.flush()
     model.session.expunge_all()
     # Check
     users = model.session.query(model.User).all()
     assert len(users) == 1
     assert users[0].email == "*****@*****.**"
     assert users[0].password == "password"
     assert len(users[0].histories) == 1
     assert users[0].histories[0].name == "History 1"
     hists = model.session.query(model.History).all()
     assert hists[0].name == "History 1"
     assert hists[1].name == ("H" * 255)
     assert hists[0].user == users[0]
     assert hists[1].user is None
     assert hists[1].datasets[0].metadata.chromCol == 1
     id = hists[1].datasets[0].id
     assert hists[1].datasets[0].file_name == os.path.join(
         "/tmp", *directory_hash_id(id)) + ("/dataset_%d.dat" % id)
     # Do an update and check
     hists[1].name = "History 2b"
     model.session.flush()
     model.session.expunge_all()
     hists = model.session.query(model.History).all()
     assert hists[0].name == "History 1"
     assert hists[1].name == "History 2b"
Exemplo n.º 7
0
    def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
        """ Construct the expected absolute path for accessing the object
            identified by `obj`.id.

        :type base_dir: string
        :param base_dir: A key in self.extra_dirs corresponding to the base
                         directory in which this object should be created, or
                         None to specify the default directory.

        :type dir_only: bool
        :param dir_only: If True, check only the path where the file
                         identified by `obj` should be located, not the
                         dataset itself. This option applies to `extra_dir`
                         argument as well.

        :type extra_dir: string
        :param extra_dir: Append the value of this parameter to the expected path
                          used to access the object identified by `obj`
                          (e.g., /files/000/<extra_dir>/dataset_10.dat).

        :type alt_name: string
        :param alt_name: Use this name as the alternative name for the returned
                         dataset rather than the default.

        :type old_style: bool
        param old_style: This option is used for backward compatibility. If True
                         the composed directory structure does not include a hash id
                         (e.g., /files/dataset_10.dat (old) vs. /files/000/dataset_10.dat (new))
        """
        base = self.extra_dirs.get(base_dir, self.file_path)
        if old_style:
            if extra_dir is not None:
                path = os.path.join(base, extra_dir)
            else:
                path = base
        else:
            # Construct hashed path
            rel_path = os.path.join(*directory_hash_id(obj.id))
            # Optionally append extra_dir
            if extra_dir is not None:
                if extra_dir_at_root:
                    rel_path = os.path.join(extra_dir, rel_path)
                else:
                    rel_path = os.path.join(rel_path, extra_dir)
            path = os.path.join(base, rel_path)
        if not dir_only:
            path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
        return os.path.abspath(path)
Exemplo n.º 8
0
 def test_basic( self ):
     # Start the database and connect the mapping
     model = mapping.init( "/tmp", "sqlite:///:memory:", create_tables=True )
     assert model.engine is not None
     # Make some changes and commit them
     u = model.User( email="*****@*****.**", password="******" )
     # gs = model.GalaxySession()
     h1 = model.History( name="History 1", user=u)
     #h1.queries.append( model.Query( "h1->q1" ) )
     #h1.queries.append( model.Query( "h1->q2" ) )
     h2 = model.History( name=( "H" * 1024 ) )
     model.session.add_all( ( u, h1, h2 ) )
     #q1 = model.Query( "h2->q1" )
     d1 = model.HistoryDatasetAssociation( extension="interval", metadata=dict(chromCol=1,startCol=2,endCol=3 ), history=h2, create_dataset=True, sa_session=model.session )
     #h2.queries.append( q1 )
     #h2.queries.append( model.Query( "h2->q2" ) )
     model.session.add( ( d1 ) )
     model.session.flush()
     model.session.expunge_all()
     # Check
     users = model.session.query( model.User ).all()
     assert len( users ) == 1
     assert users[0].email == "*****@*****.**"
     assert users[0].password == "password"
     assert len( users[0].histories ) == 1
     assert users[0].histories[0].name == "History 1"    
     hists = model.session.query( model.History ).all()
     assert hists[0].name == "History 1"
     assert hists[1].name == ( "H" * 255 )
     assert hists[0].user == users[0]
     assert hists[1].user is None
     assert hists[1].datasets[0].metadata.chromCol == 1
     id = hists[1].datasets[0].id
     assert hists[1].datasets[0].file_name == os.path.join( "/tmp", *directory_hash_id( id ) ) + ( "/dataset_%d.dat" % id )
     # Do an update and check
     hists[1].name = "History 2b"
     model.session.flush()
     model.session.expunge_all()
     hists = model.session.query( model.History ).all()
     assert hists[0].name == "History 1"
     assert hists[1].name == "History 2b"