def find_requirement(self, req, upgrade): if is_req_pinned(req.req): # if the version is pinned-down by a == # first try to use any installed package that satisfies the req if req.satisfied_by: logger.info('Faster! pinned requirement already installed.') raise BestVersionAlreadyInstalled # then try an optimistic search for a .whl file: link = optimistic_wheel_search(req.req, self.index_urls) if link is None: # The wheel will be built during prepare_files logger.debug('No wheel found locally for pinned requirement %s', req) else: logger.info('Faster! Pinned wheel found, without hitting PyPI.') return link else: # unpinned requirements aren't very notable. only show with -v logger.info('slow: full search for unpinned requirement %s', req) # otherwise, do the full network search, per usual try: return super(FasterPackageFinder, self).find_requirement(req, upgrade) except DistributionNotFound: exc_info = sys.exc_info() # Best effort: try and install from suitable version on-disk link = optimistic_wheel_search(req.req, self.index_urls) if link: return link else: reraise(*exc_info)
def find_requirement(self, req, upgrade): if is_req_pinned(req.req): # if the version is pinned-down by a == # first try to use any installed package that satisfies the req if req.satisfied_by: logger.info('Faster! pinned requirement already installed.') raise BestVersionAlreadyInstalled # then try an optimistic search for a .whl file: link = optimistic_wheel_search(req.req, self.index_urls) if link is None: # The wheel will be built during prepare_files logger.debug( 'No wheel found locally for pinned requirement %s', req) else: logger.info( 'Faster! Pinned wheel found, without hitting PyPI.') return link else: # unpinned requirements aren't very notable. only show with -v logger.info('slow: full search for unpinned requirement %s', req) # otherwise, do the full network search, per usual try: return super(FasterPackageFinder, self).find_requirement(req, upgrade) except DistributionNotFound: exc_info = sys.exc_info() # Best effort: try and install from suitable version on-disk link = optimistic_wheel_search(req.req, self.index_urls) if link: return link else: reraise(*exc_info)
def process_array_with_master_node_fields(node_properties_as_array): host, port = util.split_address(node_properties_as_array[1]) if is_ip(host): start_slot = 0 end_slot = 0 if len(node_properties_as_array) is 9: start_slot, end_slot = process_start_end_slots( node_properties_as_array[8]) master_node = MasterNode(start_slot, end_slot, host, int(port), node_properties_as_array[0]) logger.debug(master_node) return master_node return None
def perform_resharding(masters_with_slots, masters_without_slots, source): amount_of_masters = len(masters_with_slots) + len(masters_without_slots) i = 0 for master_with_slots in masters_with_slots: shards_amount_master_will_give = master_with_slots.calculate_amount_of_shards( amount_of_masters) logger.debug("%s will give %s shards per split" % (master_with_slots, shards_amount_master_will_give)) shards_amount_per_one_master = int(shards_amount_master_will_give) for master_without_slots in masters_without_slots: cmd_args = [ '--cluster', 'reshard', source, '--cluster-from', master_with_slots.node_id, '--cluster-to', master_without_slots.node_id, '--cluster-slots', str(shards_amount_per_one_master), '--cluster-yes' ] logger.debug( "Sharding %s to %s %s slots" % (master_with_slots.node_id, master_without_slots.node_id, shards_amount_per_one_master)) #util.run_redis_cli_cmd(cmd_args, False) logger.debug('Soon will run sanity check') time.sleep(5) cmd_args = [ '--cluster', 'fix', master_without_slots.ip + ":" + str(master_without_slots.port) ] result = util.run_redis_cli_cmd(cmd_args, True) logger.debug('Sanity check returned code %s' % (str(result.returncode))) i += 1
def add_entry(self, entry): """Same as the original .add_entry, but sets only=False, so that egg-links are honored.""" logger.debug('working-set entry: %r', entry) self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in pkg_resources.find_distributions(entry, False): # eggs override anything that's installed normally # fun fact: pkg_resources.working_set's results depend on the # ordering of os.listdir since the order of os.listdir is # entirely arbitrary (an implemenation detail of file system), # without calling site.main(), an .egg-link file may or may not # be honored, depending on the filesystem replace = (dist.precedence == pkg_resources.EGG_DIST) self._normalized_name_mapping[normalize_name(dist.key)] = dist.key self.add(dist, entry, False, replace=replace)
def extract_masters_without_slots(all_nodes): master_nodes_to_return = [] i = 0 while i < len(all_nodes): node = all_nodes[i] if not ('slave' in node or 'noaddr' in node): node_as_array = re.compile(' ').split(node) if 8 >= len(node_as_array) > 1: host, port = util.split_address(node_as_array[1]) if is_ip(host): master_node_to_add = MasterNode(0, 0, host, int(port), node_as_array[0]) logger.debug(master_node_to_add) master_nodes_to_return.append(master_node_to_add) i += 1 return master_nodes_to_return
def add_entry(self, entry): """Same as the original .add_entry, but sets only=False, so that egg-links are honored.""" logger.debug('working-set entry: %r', entry) self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in pkg_resources.find_distributions(entry, False): # eggs override anything that's installed normally # fun fact: pkg_resources.working_set's results depend on the # ordering of os.listdir since the order of os.listdir is # entirely arbitrary (an implemenation detail of file system), # without calling site.main(), an .egg-link file may or may not # be honored, depending on the filesystem replace = (dist.precedence == pkg_resources.EGG_DIST) self._normalized_name_mapping[normalize_name( dist.key)] = dist.key self.add(dist, entry, False, replace=replace)
def reshard(source): logger.info("Started resharding") host, port = util.split_address(source) cluster_masters_with_slots = get_slot_distribution(host, port) logger.debug('Found %s master(s) in the cluster with slots', len(cluster_masters_with_slots)) cluster_masters_without_slots = get_master_without_slots(host, port) logger.debug('Found %s master(s) in the cluster without slots', len(cluster_masters_without_slots)) if len(cluster_masters_without_slots) == 0: logger.error( 'Cannot start resharding, since there are no masters where slots could be placed' ) return logger.info('Performing resharding...') perform_resharding(cluster_masters_with_slots, cluster_masters_without_slots, source) logger.info('[√] Done resharding')
def extract_cluster_masters_with_slots(array_of_all_nodes): master_nodes = [] i = 0 while i < len(array_of_all_nodes): element = array_of_all_nodes[i] if is_ip(element): try: master_node_to_add = MasterNode(int(array_of_all_nodes[i - 2]), int(array_of_all_nodes[i - 1]), array_of_all_nodes[i], int(array_of_all_nodes[i + 1]), array_of_all_nodes[i + 2]) logger.debug(master_node_to_add) master_nodes.append(master_node_to_add) i += 3 continue except (TypeError, ValueError): i += 1 continue i += 1 return master_nodes
def trace_requirements(requirements): """given an iterable of pip InstallRequirements, return the set of required packages, given their transitive requirements. """ requirements = tuple(pretty_req(r) for r in requirements) working_set = fresh_working_set() # breadth-first traversal: from collections import deque queue = deque(requirements) queued = {_package_req_to_pkg_resources_req(req.req) for req in queue} errors = [] result = [] while queue: req = queue.popleft() logger.debug('tracing: %s', req) try: dist = working_set.find_normalized( _package_req_to_pkg_resources_req(req.req)) except pkg_resources.VersionConflict as conflict: dist = conflict.args[0] errors.append('Error: version conflict: {} ({}) <-> {}'.format( dist, timid_relpath(dist.location), req)) assert dist is not None, 'Should be unreachable in pip8+' result.append(dist_to_req(dist)) # TODO: pip does no validation of extras. should we? extras = [extra for extra in req.extras if extra in dist.extras] for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key): sub_req = InstallRequirement(sub_req, req) if req_cycle(sub_req): logger.warning('Circular dependency! %s', sub_req) continue elif sub_req.req in queued: logger.debug('already queued: %s', sub_req) continue else: logger.debug('adding sub-requirement %s', sub_req) queue.append(sub_req) queued.add(sub_req.req) if errors: raise InstallationError('\n'.join(errors)) return result
def trace_requirements(requirements): """given an iterable of pip InstallRequirements, return the set of required packages, given their transitive requirements. """ requirements = tuple(pretty_req(r) for r in requirements) working_set = fresh_working_set() # breadth-first traversal: from collections import deque queue = deque(requirements) queued = {_package_req_to_pkg_resources_req(req.req) for req in queue} errors = [] result = [] while queue: req = queue.popleft() logger.debug('tracing: %s', req) try: dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req)) except pkg_resources.VersionConflict as conflict: dist = conflict.args[0] errors.append('Error: version conflict: {} ({}) <-> {}'.format( dist, timid_relpath(dist.location), req )) assert dist is not None, 'Should be unreachable in pip8+' result.append(dist_to_req(dist)) # TODO: pip does no validation of extras. should we? extras = [extra for extra in req.extras if extra in dist.extras] for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key): sub_req = InstallRequirement(sub_req, req) if req_cycle(sub_req): logger.warning('Circular dependency! %s', sub_req) continue elif sub_req.req in queued: logger.debug('already queued: %s', sub_req) continue else: logger.debug('adding sub-requirement %s', sub_req) queue.append(sub_req) queued.add(sub_req.req) if errors: raise InstallationError('\n'.join(errors)) return result