コード例 #1
0
ファイル: remove.py プロジェクト: yuben75/yotta
def rmLinkOrDirectory(path, nonexistent_warning):
    if not os.path.exists(path):
        logging.warning(nonexistent_warning)
        return 1
    if fsutils.isLink(path):
        fsutils.rmF(path)
    else:
        fsutils.rmRf(path)
    return 0
コード例 #2
0
ファイル: access_common.py プロジェクト: yuben75/yotta
def removeFromCache(cache_key):
    f = os.path.join(folders.cacheDirectory(), cache_key)
    try:
        fsutils.rmF(f)
        # remove any metadata too, if it exists
        fsutils.rmF(f + '.json')
    except OSError as e:
        # if we failed to remove either file, then it might be because another
        # instance of yotta is using it, so just skip it this time.
        pass
コード例 #3
0
def execCommand(args, following_args):
    # standard library modules
    import logging

    # validate, , validate things, internal
    from yotta.lib import validate
    # ordered_json, , order-preserving json handling, internal
    from yotta.lib import ordered_json
    # fsutils, , filesystem utils, internal
    from yotta.lib.fsutils import rmF
    # list, , the yotta list subcommand, internal
    from yotta import list as yotta_list

    # first remove any existing shrinkwrap:
    rmF('yotta-shrinkwrap.json')

    c = validate.currentDirectoryModule()
    if not c:
        return 1
    if not args.target:
        logging.error('No target has been set, use "yotta target" to set one.')
        return 1

    target, errors = c.satisfyTarget(args.target)
    if errors:
        for error in errors:
            logging.error(error)
        return 1

    installed_modules = c.getDependenciesRecursive(target=target,
                                                   available_components=[
                                                       (c.getName(), c)
                                                   ],
                                                   test='toplevel')

    dependency_list = yotta_list.resolveDependencyGraph(target,
                                                        c,
                                                        installed_modules,
                                                        test='toplevel')

    errors = checkDependenciesForShrinkwrap(dependency_list)
    if len(errors):
        logging.error("Dependency errors prevent shrinkwrap creation:")
        for error in errors:
            logging.error(error)
        logging.error("Perhaps you need to `yotta install` first?")
        return 1

    with open('yotta-shrinkwrap.json', 'w') as f:
        f.write(
            ordered_json.dumps(
                prepareShrinkwarp(dependency_list, target.hierarchy)))
コード例 #4
0
ファイル: access_common.py プロジェクト: yuben75/yotta
def unpackFrom(tar_file_path, to_directory):
    # first unpack into a sibling directory of the specified directory, and
    # then move it into place.

    # we expect our tarballs to contain a single top-level directory. We strip
    # off this name as we extract to minimise the path length

    into_parent_dir = os.path.dirname(to_directory)
    fsutils.mkDirP(into_parent_dir)
    temp_directory = tempfile.mkdtemp(dir=into_parent_dir)
    try:
        with tarfile.open(tar_file_path) as tf:
            strip_dirname = ''
            # get the extraction directory name from the first part of the
            # extraction paths: it should be the same for all members of
            # the archive
            for m in tf.getmembers():
                split_path = fsutils.fullySplitPath(m.name)
                logger.debug('process member: %s %s', m.name, split_path)
                if os.path.isabs(m.name) or '..' in split_path:
                    raise ValueError('archive uses invalid paths')
                if not strip_dirname:
                    if len(split_path) != 1 or not len(split_path[0]):
                        raise ValueError(
                            'archive does not appear to contain a single module'
                        )
                    strip_dirname = split_path[0]
                    continue
                else:
                    if split_path[0] != strip_dirname:
                        raise ValueError(
                            'archive does not appear to contain a single module'
                        )
                m.name = os.path.join(*split_path[1:])
                tf.extract(m, path=temp_directory)
        # make sure the destination directory doesn't exist:
        fsutils.rmRf(to_directory)
        shutil.move(temp_directory, to_directory)
        temp_directory = None
        logger.debug('extraction complete %s', to_directory)
    except IOError as e:
        if e.errno != errno.ENOENT:
            logger.error('failed to extract tarfile %s', e)
            fsutils.rmF(tar_file_path)
        raise
    finally:
        if temp_directory is not None:
            # if anything has failed, cleanup
            fsutils.rmRf(temp_directory)
コード例 #5
0
ファイル: shrinkwrap.py プロジェクト: ARMmbed/yotta
def execCommand(args, following_args):
    # standard library modules
    import logging

    # validate, , validate things, internal
    from yotta.lib import validate
    # ordered_json, , order-preserving json handling, internal
    from yotta.lib import ordered_json
    # fsutils, , filesystem utils, internal
    from yotta.lib.fsutils import rmF
    # list, , the yotta list subcommand, internal
    from yotta import list as yotta_list

    # first remove any existing shrinkwrap:
    rmF('yotta-shrinkwrap.json')

    c = validate.currentDirectoryModule()
    if not c:
        return 1
    if not args.target:
        logging.error('No target has been set, use "yotta target" to set one.')
        return 1

    target, errors = c.satisfyTarget(args.target)
    if errors:
        for error in errors:
            logging.error(error)
        return 1

    installed_modules = c.getDependenciesRecursive(
                      target = target,
        available_components = [(c.getName(), c)],
                        test = 'toplevel'
    )

    dependency_list = yotta_list.resolveDependencyGraph(target, c, installed_modules, test='toplevel')

    errors = checkDependenciesForShrinkwrap(dependency_list)
    if len(errors):
        logging.error("Dependency errors prevent shrinkwrap creation:")
        for error in errors:
            logging.error(error)
        logging.error("Perhaps you need to `yotta install` first?")
        return 1

    with open('yotta-shrinkwrap.json', 'w') as f:
        f.write(ordered_json.dumps(prepareShrinkwarp(dependency_list, target.hierarchy)))
コード例 #6
0
ファイル: access_common.py プロジェクト: kushaldas/yotta
def unpackFrom(tar_file_path, to_directory):
    # first unpack into a sibling directory of the specified directory, and
    # then move it into place.

    # we expect our tarballs to contain a single top-level directory. We strip
    # off this name as we extract to minimise the path length

    into_parent_dir = os.path.dirname(to_directory)
    fsutils.mkDirP(into_parent_dir)
    temp_directory = tempfile.mkdtemp(dir=into_parent_dir)
    try:
        with tarfile.open(tar_file_path) as tf:
            strip_dirname = ''
            # get the extraction directory name from the first part of the
            # extraction paths: it should be the same for all members of
            # the archive
            for m in tf.getmembers():
                split_path = fsutils.fullySplitPath(m.name)
                logger.debug('process member: %s %s', m.name, split_path)
                if os.path.isabs(m.name) or '..' in split_path:
                    raise ValueError('archive uses invalid paths')
                if not strip_dirname:
                    if len(split_path) != 1 or not len(split_path[0]):
                        raise ValueError('archive does not appear to contain a single module')
                    strip_dirname = split_path[0]
                    continue
                else:
                    if split_path[0] != strip_dirname:
                        raise ValueError('archive does not appear to contain a single module')
                m.name = os.path.join(*split_path[1:])
                tf.extract(m, path=temp_directory)
        # make sure the destination directory doesn't exist:
        fsutils.rmRf(to_directory)
        shutil.move(temp_directory, to_directory)
        temp_directory = None
        logger.debug('extraction complete %s', to_directory)
    except IOError as e:
        if e.errno != errno.ENOENT:
            logger.error('failed to extract tarfile %s', e)
            fsutils.rmF(tar_file_path)
        raise
    finally:
        if temp_directory is not None:
            # if anything has failed, cleanup
            fsutils.rmRf(temp_directory)
コード例 #7
0
ファイル: uninstall.py プロジェクト: ARMmbed/yotta
def execCommand(args, following_args):
    err = validate.componentNameValidationError(args.component)
    if err:
        logging.error(err)
        return 1
    c = validate.currentDirectoryModule()
    if not c:
        return 1
    status = 0
    if not c.removeDependency(args.component):
        status = 1
    else:
        c.writeDescription()
    path = os.path.join(c.modulesPath(), args.component)
    if fsutils.isLink(path):
        fsutils.rmF(path)
    else:
        fsutils.rmRf(path)
    return status
コード例 #8
0
ファイル: uninstall.py プロジェクト: yuben75/yotta
def execCommand(args, following_args):
    err = validate.componentNameValidationError(args.component)
    if err:
        logging.error(err)
        return 1
    c = validate.currentDirectoryModule()
    if not c:
        return 1
    status = 0
    if not c.removeDependency(args.component):
        status = 1
    else:
        c.writeDescription()
    path = os.path.join(c.modulesPath(), args.component)
    if fsutils.isLink(path):
        fsutils.rmF(path)
    else:
        fsutils.rmRf(path)
    return status
コード例 #9
0
 def publish(self, registry=None):
     ''' Publish to the appropriate registry, return a description of any
         errors that occured, or None if successful.
         No VCS tagging is performed.
     '''
     if (registry is None) or (registry
                               == registry_access.Registry_Base_URL):
         if 'private' in self.description and self.description['private']:
             return "this %s is private and cannot be published" % (
                 self.description_filename.split('.')[0])
     upload_archive = os.path.join(self.path, 'upload.tar.gz')
     fsutils.rmF(upload_archive)
     fd = os.open(
         upload_archive,
         os.O_CREAT | os.O_EXCL | os.O_RDWR | getattr(os, "O_BINARY", 0))
     with os.fdopen(fd, 'rb+') as tar_file:
         tar_file.truncate()
         self.generateTarball(tar_file)
         logger.debug('generated tar file of length %s', tar_file.tell())
         tar_file.seek(0)
         # calculate the hash of the file before we upload it:
         shasum = hashlib.sha256()
         while True:
             chunk = tar_file.read(1000)
             if not chunk:
                 break
             shasum.update(chunk)
         logger.debug('generated tar file has hash %s', shasum.hexdigest())
         tar_file.seek(0)
         with self.findAndOpenReadme() as readme_file_wrapper:
             if not readme_file_wrapper:
                 logger.warning("no readme.md file detected")
             with open(self.getDescriptionFile(), 'r') as description_file:
                 return registry_access.publish(
                     self.getRegistryNamespace(),
                     self.getName(),
                     self.getVersion(),
                     description_file,
                     tar_file,
                     readme_file_wrapper.file,
                     readme_file_wrapper.extension().lower(),
                     registry=registry)
コード例 #10
0
ファイル: pack.py プロジェクト: Timmmm/yotta
 def publish(self, registry=None):
     ''' Publish to the appropriate registry, return a description of any
         errors that occured, or None if successful.
         No VCS tagging is performed.
     '''
     if (registry is None) or (registry == registry_access.Registry_Base_URL):
         if 'private' in self.description and self.description['private']:
             return "this %s is private and cannot be published" % (self.description_filename.split('.')[0])
     upload_archive = os.path.join(self.path, 'upload.tar.gz')
     fsutils.rmF(upload_archive)
     fd = os.open(upload_archive, os.O_CREAT | os.O_EXCL | os.O_RDWR | getattr(os, "O_BINARY", 0))
     with os.fdopen(fd, 'rb+') as tar_file:
         tar_file.truncate()
         self.generateTarball(tar_file)
         logger.debug('generated tar file of length %s', tar_file.tell())
         tar_file.seek(0)
         # calculate the hash of the file before we upload it:
         shasum = hashlib.sha256()
         while True:
             chunk = tar_file.read(1000)
             if not chunk:
                 break
             shasum.update(chunk)
         logger.debug('generated tar file has hash %s', shasum.hexdigest())
         tar_file.seek(0)
         with self.findAndOpenReadme() as readme_file_wrapper:
             if not readme_file_wrapper:
                 logger.warning("no readme.md file detected")
             with open(self.getDescriptionFile(), 'r') as description_file:
                 return registry_access.publish(
                     self.getRegistryNamespace(),
                     self.getName(),
                     self.getVersion(),
                     description_file,
                     tar_file,
                     readme_file_wrapper.file,
                     readme_file_wrapper.extension().lower(),
                     registry=registry
                 )
コード例 #11
0
ファイル: access_common.py プロジェクト: yuben75/yotta
def _moveCachedFile(from_key, to_key):
    ''' Move a file atomically within the cache: used to make cached files
        available at known keys, so they can be used by other processes.
    '''
    cache_dir = folders.cacheDirectory()
    from_path = os.path.join(cache_dir, from_key)
    to_path = os.path.join(cache_dir, to_key)
    try:
        os.rename(from_path, to_path)
        # if moving the actual file was successful, then try to move the
        # metadata:
        os.rename(from_path + '.json', to_path + '.json')
    except Exception as e:
        # if the source doesn't exist, or the destination doesn't exist, remove
        # the file instead.
        # windows error 183 == file already exists
        # (be careful not to use WindowsError on non-windows platforms as it
        # isn't defined)
        if (isinstance(e, OSError) and e.errno == errno.ENOENT) or \
           (isinstance(e, getattr(__builtins__, "WindowsError", type(None))) and e.errno == 183):
            fsutils.rmF(from_path)
        else:
            raise
コード例 #12
0
ファイル: access_common.py プロジェクト: kushaldas/yotta
def downloadToCache(stream, hashinfo={}, cache_key=None, origin_info=dict()):
    ''' Download the specified stream to a temporary cache directory, and
        returns a cache key that can be used to access/remove the file.
        If cache_key is None, then a cache key will be generated and returned.
        You will probably want to use removeFromCache(cache_key) to remove it.
    '''
    hash_name  = None
    hash_value = None
    m = None

    if len(hashinfo):
        # check for hashes in preferred order. Currently this is just sha256
        # (which the registry uses). Initial investigations suggest that github
        # doesn't return a header with the hash of the file being downloaded.
        for h in ('sha256',):
            if h in hashinfo:
                hash_name  = h
                hash_value = hashinfo[h]
                m = getattr(hashlib, h)()
                break
        if not hash_name:
            logger.warning('could not find supported hash type in %s', hashinfo)

    if cache_key is None:
        cache_key = '%032x' % random.getrandbits(256)

    cache_dir = folders.cacheDirectory()
    fsutils.mkDirP(cache_dir)
    cache_as = os.path.join(cache_dir, cache_key)
    file_size = 0

    (download_file, download_fname) = tempfile.mkstemp(dir=cache_dir)
    with os.fdopen(download_file, 'wb') as f:
        f.seek(0)
        for chunk in stream.iter_content(4096):
            f.write(chunk)
            if hash_name:
                m.update(chunk)

        if hash_name:
            calculated_hash = m.hexdigest()
            logger.debug(
                'calculated %s hash: %s check against: %s' % (
                    hash_name, calculated_hash, hash_value
                )
            )
            if hash_value and (hash_value != calculated_hash):
                raise Exception('Hash verification failed.')
        file_size = f.tell()
        logger.debug('wrote tarfile of size: %s to %s', file_size, download_fname)
        f.truncate()
    try:
        os.rename(download_fname, cache_as)
        extended_origin_info = {
            'hash': hashinfo,
            'size': file_size
        }
        extended_origin_info.update(origin_info)
        ordered_json.dump(cache_as + '.json', extended_origin_info)
    except OSError as e:
        if e.errno == errno.ENOENT:
            # if we failed, it's because the file already exists (probably
            # because another process got there first), so just rm our
            # temporary file and continue
            cache_logger.debug('another process downloaded %s first', cache_key)
            fsutils.rmF(download_fname)
        else:
            raise

    return cache_key
コード例 #13
0
ファイル: access_common.py プロジェクト: bearsh/yotta
def downloadToCache(stream, hashinfo={}, cache_key=None, origin_info=dict()):
    ''' Download the specified stream to a temporary cache directory, and
        returns a cache key that can be used to access/remove the file.
        If cache_key is None, then a cache key will be generated and returned.
        You will probably want to use removeFromCache(cache_key) to remove it.
    '''
    hash_name = None
    hash_value = None
    m = None

    if len(hashinfo):
        # check for hashes in preferred order. Currently this is just sha256
        # (which the registry uses). Initial investigations suggest that github
        # doesn't return a header with the hash of the file being downloaded.
        for h in ('sha256', ):
            if h in hashinfo:
                hash_name = h
                hash_value = hashinfo[h]
                m = getattr(hashlib, h)()
                break
        if not hash_name:
            logger.warning('could not find supported hash type in %s',
                           hashinfo)

    if cache_key is None:
        cache_key = '%032x' % random.getrandbits(256)

    cache_dir = folders.cacheDirectory()
    fsutils.mkDirP(cache_dir)
    cache_as = os.path.join(cache_dir, cache_key)
    file_size = 0

    (download_file, download_fname) = tempfile.mkstemp(dir=cache_dir)
    with os.fdopen(download_file, 'wb') as f:
        f.seek(0)
        for chunk in stream.iter_content(4096):
            f.write(chunk)
            if hash_name:
                m.update(chunk)

        if hash_name:
            calculated_hash = m.hexdigest()
            logger.debug('calculated %s hash: %s check against: %s' %
                         (hash_name, calculated_hash, hash_value))
            if hash_value and (hash_value != calculated_hash):
                raise Exception('Hash verification failed.')
        file_size = f.tell()
        logger.debug('wrote tarfile of size: %s to %s', file_size,
                     download_fname)
        f.truncate()
    try:
        os.rename(download_fname, cache_as)
        extended_origin_info = {'hash': hashinfo, 'size': file_size}
        extended_origin_info.update(origin_info)
        ordered_json.dump(cache_as + '.json', extended_origin_info)
    except Exception as e:
        # windows error 183 == file already exists
        # (be careful not to use WindowsError on non-windows platforms as it
        # isn't defined)
        if (isinstance(e, OSError) and e.errno == errno.ENOENT) or \
           (isinstance(e, getattr(__builtins__, "WindowsError", type(None))) and e.errno == 183):
            # if we failed, it's because the file already exists (probably
            # because another process got there first), so just rm our
            # temporary file and continue
            cache_logger.debug('another process downloaded %s first',
                               cache_key)
            fsutils.rmF(download_fname)
        else:
            raise

    return cache_key
コード例 #14
0
ファイル: access_common.py プロジェクト: bearsh/yotta
def removeFromCache(cache_key):
    f = os.path.join(folders.cacheDirectory(), cache_key)
    fsutils.rmF(f)
    # remove any metadata too, if it exists
    fsutils.rmF(f + '.json')
コード例 #15
0
ファイル: vcs.py プロジェクト: parisk/yotta
 def test_isClean(self):
     self.assertTrue(self.working_copy.isClean())
     fsutils.rmF(os.path.join(self.working_copy.workingDirectory(), 'package.json'))
     self.assertFalse(self.working_copy.isClean())
コード例 #16
0
 def test_isClean(self):
     self.assertTrue(self.working_copy.isClean())
     fsutils.rmF(
         os.path.join(self.working_copy.workingDirectory(), 'module.json'))
     self.assertFalse(self.working_copy.isClean())
コード例 #17
0
ファイル: access_common.py プロジェクト: kushaldas/yotta
def removeFromCache(cache_key):
    f = os.path.join(folders.cacheDirectory(), cache_key)
    fsutils.rmF(f)
    # remove any metadata too, if it exists
    fsutils.rmF(f + '.json')