Example #1
0
class ProxyPool():
    def __init__(self,minimal=10) -> None:
        self.proxies=[]
                #多线程爬虫时,加个锁
        self._lock=Lock()
        self.minimal=minimal

    def pick_proxy(self):
            #选择proxy时,检查代{过}{滤}理池是否够用
        self._fill_pool()
        return random.choice(self.proxies)

    def remove_proxy(self,proxy):
        try:
            self.proxies.remove(proxy)
            print('Remove proxy:',proxy)
        except:
            print('Proxy has been removed!')

    def _fill_pool(self):
            #少于minimal个代{过}{滤}理时,更新代{过}{滤}理池
        if len(self.proxies)<self.minimal:
                    #加锁,防止同时重复调用
            self._lock.acquire()
            if len(self.proxies)<self.minimal:
                self.proxies=get_proxies_retry()
            self._lock.release()
Example #2
0
    def __init__(self, db_file_path='http_lib_cache.db'):
        self.compress_type = strRAW_PAGE
        self.db = shelve.open(str(db_file_path), writeback=True)

        self.db_lock = Lock()
        self.rollback()
        self.page_root_node = self.db
Example #3
0
 def __init__(self, target, threads):
     self.target = target
     self.ips = []
     self.time = time.time()
     self.threads = threads
     self.lock = Lock()
     self.get_ip_addr()
class DATA_AND(object):
    def __init__(self, payloads):
        self.threads = conf['thread_num']
        self.payloads = payloads
        self.mutex = Lock()

    def run(self, payload):
        try:
            data = payload['payload']
            text = send_data(data)
            if not find_success(success_flag, text):
                value = 0
                self.mutex.acquire()
                payload['value'] = value
                self.mutex.release()
        except Exception as e:
            print e.message

    def _start(self):
        try:

            pool = ThreadPool(processes=self.threads)
            pool.map_async(self.run, self.payloads).get(0xffff)
            pool.close()
            pool.join()

        except Exception as e:
            print e
        except KeyboardInterrupt:
            print '[!] user quit!'
            sys.exit(1)
 def __init__(self):
     self.client_list = deque()
     self.db = DB()
     self.stats = {}
     self.pool = None
     self.lock = Lock()
     signal.signal(signal.SIGTERM, self.term_test)
     signal.signal(signal.SIGINT, self.term_test)
Example #6
0
    def __init__(self, target, startPort, endPort):
        self.target = target
        self.startPort = startPort
        self.endPort = endPort

        self.dnsRecords = []
        self.mutex = Lock()

        self.ports = []
        self.getPorts()
        self.time = time()
Example #7
0
class PixivAlbum(PixivItem):
    def __init__(self, illust_id, headers):
        PixivItem.__init__(self, illust_id, headers)
        self.albumURL = f'https://www.pixiv.net/member_illust.php?mode=manga&illust_id={illust_id}'
        self.session.headers['Referer'] = self.albumURL

    def getAlbum(self, path):
        logging.info(f'getting album {self.illust_id}...')

        # get title
        soup = self.getSoup(self.pageURL)
        logging.debug(f'page for {self.illust_id} (main page) get.')
        self.getTitleAndArtist(soup)

        # get pic urls
        picURLs = set()
        soup = self.getSoup(self.albumURL)
        imageContainers = soup.find_all('div', {'class': 'item-container'})
        for item in imageContainers:
            picURLs.add(item.img['data-src'])

        # mkdir
        thisPath = path + os.sep + self.title + ' - ' + self.artist
        if not os.path.exists(thisPath):
            os.mkdir(thisPath)

        self.count = 1
        self.lock = ThreadLock()

        def func(url):
            self.downloadImage(url, path)
            self.lock.acquire()
            logging.debug(
                f'({self.title} - {self.artist}) pic {self.count} done.')
            self.count += 1
            self.lock.release()

        pool = ThreadPool(5)
        pool.map(func, picURLs)
        pool.close()
        pool.join()

        logging.info(
            f'album {self.title} - {self.artist} ({self.illust_id}) done.')

    def downloadImage(self, url, path):
        thisPath = path + os.sep + self.title + ' - ' + self.artist

        num = re.findall(r'p(\d+)', url)[0]
        imageName = thisPath + os.sep + num + '.' + url.split('.')[-1]
        imageName = imageName.replace('*', '※').replace('?', '?')
        self.downloadImageTo(url, imageName)
Example #8
0
class assassin:
    def __init__(self, ipfile, store_file, thread_num, port):
        self.ipfile = open(ipfile, "r")
        self.thread_num = thread_num
        self.port = port
        self.vuls = set()
        self.store_file = store_file
        self.lock = Lock()
        
    def _ips(self):
        for line in self.ipfile:
            line = line.strip()
            if( len(line) == 0):
                continue
            for ip in ipaddress.ip_network(unicode(line), strict=False):
                yield '%s' % ip 

    def run(self, host):
        vul = False 
        try:
            sock = socket.socket()
            socket.setdefaulttimeout(3)
            sock.connect((host, self.port))
            payload = '\x2a\x31\x0d\x0a\x24\x34\x0d\x0a\x69\x6e\x66\x6f\x0d\x0a'
            sock.send(payload)
            recvdata = sock.recv(1024)
            if recvdata and 'redis_version' in recvdata:
                self.lock.acquire()
                print '[+] maybe vul: %s' % host
                self.vuls.add(host)
                self.lock.release()
                vul = True
                return vul
            else:
                return vul 
        except:
            return vul
        finally:
            sock.close()
            
    def multi_assasin(self):
        start = time.time()
        ip_set = self._ips()
        pool = ThreadPool(self.thread_num)
        results = pool.map(self.run, ip_set)
        pool.close()
        pool.join()
        with open(self.store_file, "w") as f:
            for _ in self.vuls:
                f.write(_ + "\n")
        print "%s\ttotal vuln sites:%s \n used %s minutes." % (time.ctime(),\
                len(self.vuls), (time.time()-start)/ 60.0)
Example #9
0
 def __init__(self, target, start, end):
     self.target = target
     self.start = start
     self.end = end
     self.W = '\033[0m'
     self.G = '\033[1;32m'
     self.O = '\033[1;33m'
     self.R = '\033[1;31m'
     self.time = time()
     self.ports = []
     self.result = []
     self.mutex = Lock()
     self.get_ports()
Example #10
0
 def __init__(self, target, threads, custom_ports):
     self.W            = '\033[0m'
     self.G            = '\033[1;32m'
     self.O            = '\033[1;33m'
     self.R            = '\033[1;31m'
     self.custom_ports = custom_ports
     self.server       = target
     self.result       = []
     self.ips          = []
     self.time         = time.time()
     self.threads      = threads
     self.lock         = Lock()
     self.target       = self.handle_target()
     self.get_ip_addr()
Example #11
0
 def __init__(self, host, user, protocol, threads, ssl):
     self.W = '\033[0m'
     self.G = '\033[1;32m'
     self.O = '\033[1;33m'
     self.R = '\033[1;31m'
     self.time = time.time()
     self.host = host
     self.user = user
     self.proto = protocol
     self.uname = ''
     self.pwd = []
     self.result = []
     self.ssl = ssl
     self.thread = threads
     self.lock = Lock()
Example #12
0
    def __init__(self,
                 creator,
                 pool_size=5,
                 max_overflow=10,
                 timeout=30,
                 use_lifo=False,
                 **kw):
        """
        pool_size:
        Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain.
        pool_size can be set to 0 to indicate no size limit; to disable pooling, use a NullPool instead.

        max_overflow:
        When the number of checked-out connections reaches the size set in pool_size,additional connections will be returned up to this limit.
        When those additional connections are returned to the pool,they are disconnected and discarded.
        the total number of simultaneous connections the pool will allow is pool_size + max_overflow,and the total number of "sleeping" connections the pool will allow is pool_size.
        max_overflow can be set to -1 to indicate no overflow limit;

        kw:
        Other keyword arguments including Pool.recycle,Pool.echo,Pool.reset_on_return and others are passed to the class Pool constructor.
        """
        Pool.__init__(self, creator, **kw)
        self._pool = sqla_queue.Queue(pool_size, use_lifo=use_lifo)
        self._overflow = -pool_size  # 注意
        self._max_overflow = max_overflow
        self._timeout = timeout
        self._overflow_lock = Lock()
Example #13
0
 def __init__(self,processes,maxiter,lb,ub,mp_pool,minstep,minfunc,debug,quiet,initial_best_guess=None):
     from multiprocessing.dummy import Lock
     self.xlog = {}
     self.fxlog = {}
     for i in range(processes):
         self.xlog[str(i)] = []
         self.fxlog[str(i)] = []
     if initial_best_guess == None:    
         self.g = np.random.rand(len(lb))
         self.g = lb + self.g*(ub - lb) #position of global best - random at start.
         self.fg = np.inf #cost of global best
     else:
         self.g = np.array(initial_best_guess[0])
         self.fg = initial_best_guess[1]
     self.last_g = np.array(self.g)
     self.last_fg = self.fg
     self.lock = Lock()
     self.count = 0
     self.processes = processes
     self.maxiter = maxiter
     self.minstep = minstep
     self.minfunc = minfunc
     self.debug = debug
     self.maxcount = maxiter * processes
     self.end = False
     self.mp_pool = mp_pool
     self.quiet = quiet
     self.no_improvement_count = 0
Example #14
0
    def __init__(self, max_threads=1, max_browsers=0):

        super(TokenTops, self).__init__(max_threads, max_browsers)

        # should be 'firefox', 'chrome' or 'phantomjs'(headless)
        self.browser_name = 'firefox'

        # should be 'html5lib', 'lxml' or 'html.parser'
        self.html_parser = 'html5lib'

        self.mutex = Lock()

        self.NOT_FOUND_MSG = "From {}: could not find {}"

        # location of listings in website, may be more than one
        self.urls = [
            'https://www.tokentops.com/ico/?page=1',
            'https://www.tokentops.com/ico/?page=2',
            'https://www.tokentops.com/ico/?page=3',
            'https://www.tokentops.com/ico/?page=4',
            'https://www.tokentops.com/ico/?page=5',
            'https://www.tokentops.com/ico/?page=6',
            'https://www.tokentops.com/ico/?page=7',
            'https://www.tokentops.com/ico/?page=8',
            'https://www.tokentops.com/ico/?page=9',
            'https://www.tokentops.com/ico/?page=10',
            'https://www.tokentops.com/ico/?page=11',
            'https://www.tokentops.com/ico/?page=12',
            'https://www.tokentops.com/ico/?page=13'
        ]

        self.domain = 'https://www.tokentops.com/'
Example #15
0
 def __init__(self, ipfile, store_file, thread_num, port):
     self.ipfile = open(ipfile, "r")
     self.thread_num = thread_num
     self.port = port
     self.vuls = set()
     self.store_file = store_file
     self.lock = Lock()
Example #16
0
class Bank(object):
    tie_lock = Lock()

    @staticmethod
    def __get_hash(obj):
        return id(obj)  # hash_func(obj)

    @classmethod
    def transfer(p_from, p_to, money):
        """p_from:谁转账,p_to:转给谁,money:转多少"""
        from_hash = Bank.__get_hash(p_from)
        to_hash = Bank.__get_hash(p_to)

        # 规定:谁大先锁谁
        if from_hash > to_hash:
            with p_from.lock:
                p_from.money -= money
                sleep(1)  # 模拟网络延迟
                with p_to.lock:
                    p_to += money
        elif from_hash < to_hash:
            with p_to:
                p_to.money += money
                sleep(1)  # 模拟网络延迟
                with p_from:
                    p_from.money -= money
        # hash出现碰撞时处理:(可能性很低)
        else:
            # 平局的时候,大家一起抢一个中间锁,谁抢到谁先转账
            with Bank.tie_lock:
                with p_from.lock:
                    p_from.money -= money
                    sleep(1)  # 模拟网络延迟
                    with p_to.lock:
                        p_to.money += money
Example #17
0
def find_process_files(root_dir):
    lock = Lock()
    pool = Pool()

    hash_db = load_hashes(HASH_FILE)
    # Keep changed .pxi hashes in a separate dict until the end
    # because if we update hash_db and multiple files include the same
    # .pxi file the changes won't be detected.
    pxi_hashes = {}

    jobs = []

    for cur_dir, dirs, files in os.walk(root_dir):
        for filename in files:
            in_file = os.path.join(cur_dir, filename + ".in")
            if filename.endswith('.pyx') and os.path.isfile(in_file):
                continue
            for fromext, function in rules.items():
                if filename.endswith(fromext):
                    toext = ".c"
                    with open(os.path.join(cur_dir, filename), 'rb') as f:
                        data = f.read()
                        m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
                        if m:
                            toext = ".cxx"
                    fromfile = filename
                    tofile = filename[:-len(fromext)] + toext
                    jobs.append((cur_dir, fromfile, tofile, function, hash_db, pxi_hashes, lock))

    for result in pool.imap(lambda args: process(*args), jobs):
        pass

    hash_db.update(pxi_hashes)
    save_hashes(hash_db, HASH_FILE)
Example #18
0
def find_process_files(root_dir):
    lock = Lock()

    try:
        num_proc = int(os.environ.get('SCIPY_NUM_CYTHONIZE_JOBS', cpu_count()))
        pool = Pool(processes=num_proc)
    except ImportError as e:
        # Allow building (single-threaded) on GNU/Hurd, which does not
        # support semaphores so Pool cannot initialize.
        pool = type('', (), {
            'imap_unordered':
            lambda self, func, iterable: map(func, iterable)
        })()
    except ValueError:
        pool = Pool()

    hash_db = load_hashes(HASH_FILE)
    # Keep changed pxi/pxd hashes in a separate dict until the end
    # because if we update hash_db and multiple files include the same
    # .pxi file the changes won't be detected.
    dep_hashes = {}

    # Run any _generate_pyx.py scripts
    jobs = []
    for cur_dir, dirs, files in os.walk(root_dir):
        generate_pyx = os.path.join(cur_dir, '_generate_pyx.py')
        if os.path.exists(generate_pyx):
            jobs.append(generate_pyx)

    for result in pool.imap_unordered(
            lambda fn: process_generate_pyx(fn, lock), jobs):
        pass

    # Process pyx files
    jobs = []
    for cur_dir, dirs, files in os.walk(root_dir):
        for filename in files:
            in_file = os.path.join(cur_dir, filename + ".in")
            if filename.endswith('.pyx') and os.path.isfile(in_file):
                continue
            for fromext, function in rules.items():
                if filename.endswith(fromext):
                    toext = ".c"
                    with open(os.path.join(cur_dir, filename), 'rb') as f:
                        data = f.read()
                        m = re.search(
                            br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$",
                            data, re.I | re.M)
                        if m:
                            toext = ".cxx"
                    fromfile = filename
                    tofile = filename[:-len(fromext)] + toext
                    jobs.append((cur_dir, fromfile, tofile, function, hash_db,
                                 dep_hashes, lock))

    for result in pool.imap_unordered(lambda args: process(*args), jobs):
        pass

    hash_db.update(dep_hashes)
    save_hashes(hash_db, HASH_FILE)
Example #19
0
 def __init__(self, transfermanager, session_count, files_dir='', coo=None):
     """
     Author: David
     Desc:
             maxs defines how much sessions run concurrently.
             curs defines how much sessions are running now.
             wq is for storing delayed jobs due to given maxs.
     """
     self._tm = transfermanager
     self._maxs = session_count
     self._curs = 0
     self._files_dir = files_dir
     self._wq = []
     self._pl = Pool()
     self._lock = Lock()
     self._coo = coo
Example #20
0
def main():
    print(f"[还钱前]小明{xiaoming},小张{xiaozhang},小周{xiaozhou}")
    lock = Lock()
    p = ThreadPool()
    p.apply_async(test, args=(lock, ))
    p.close()
    p.join()
    print(f"[还钱后]小明{xiaoming},小张{xiaozhang},小周{xiaozhou}")
Example #21
0
def main():
    lock = Lock()
    p_list = [threading.Thread(target=test, args=(i, lock)) for i in range(5)]
    for i in p_list:
        i.start()
    for i in p_list:
        i.join()
    print(num)
Example #22
0
 def __init__(self, maxCount, maxLength=50, printCount=True, printPercentage=True, printTime=False):
     self.lock = ThreadLock()
     self.maxCount = maxCount
     self.currentCount = 0
     self.maxLength = maxLength
     self.printCount = printCount
     self.printPercentage = printPercentage
     self.printTime = printTime
     self.startTime = time.time()
Example #23
0
def singleton(cls):
    _instance = {}
    lock = Lock()
    def _singleton(*args, **kwargs):
        with lock:  # 线程安全单例模式
            if cls not in _instance:
                _instance[cls] = cls(*args, **kwargs)
        return  _instance[cls]
    return _singleton
Example #24
0
 def file_info(self, filename, writable=False):
     with self.open_lock:
         if filename in self.open_files.keys():
             return self.open_files[filename]
         mode = {True: 'wb', False: 'rb'}[writable]
         file_handle = open(filename, mode)
         write_lock = Lock()
         self.open_files[filename] = dict(file_handle=file_handle,
                                          write_lock=write_lock)
     return self.file_info(filename, writable)
Example #25
0
 def __init__(self,
              input_files,
              ncpus=2,
              binary_name='fast',
              stream=sys.stdout):
     self.input_files = input_files
     self.ncpus = ncpus
     self.binary_name = binary_name
     self.stream = stream
     self.lock = Lock()
Example #26
0
def liveTotal():
    cacheLock = Lock()
    liveQuoteAtomic = partial(liveQuote,lock=cacheLock)
    threadPool = Pool(5)
    dailyTotals = threadPool.map(liveQuoteAtomic,STOCKS.keys())
    threadPool.close()
    threadPool.join()
    balance = int(sum([dt[0] for dt in dailyTotals]))
    totalDayChange = int(sum([dt[1] for dt in dailyTotals]))
    Result = collections.namedtuple("Result",["balance","dayChange"])
    return Result(balance,totalDayChange)
Example #27
0
def main():
    resource = 5  # 5个筷子,5个哲学家
    locks = [Lock() for i in range(resource)]  # 几个资源几个锁

    p = ThreadPool(resource)  # 让线程池里面有5个线程(默认是cup核数)
    for i in range(resource):
        # 抢左手筷子(locks[i])和右手的筷子(locks[(i + 1) % resource])
        # 举个例子更清楚:i=0 ==> 0,1;i=4 ==> 4,0
        p.apply_async(eat, args=(locks[i], locks[(i + 1) % resource]))
    p.close()
    p.join()
Example #28
0
def singleton_pool(cls):
    _instance_pool = []
    lock = Lock()
    def _singleton(*args, **kwargs):
        with lock:  # 线程安全单例池
            for _args,_kwargs,_instance in _instance_pool:
                if (_args,_kwargs) == (args,kwargs):
                    return _instance
            _instance = cls(*args, **kwargs)
            _instance_pool.append((args, kwargs, _instance))
            return _instance
    return _singleton
Example #29
0
def parallel_scan(app_config, user_id, media_files):
    """
    Once the app is launched for the first scan (when there is no database) or in order to re-scan,
    analyzing media files will be performed on demand (as authorized user, navigate to /settings and
    click 'Scan Media Files' button).
    To speed-up the scan process, 32 threads max will be used for this task.
    During the scan process files metadata is retrieved from files and is registered in the DB.

    :param app_config: a dictionary containing the application configuration settings (=app.config).
    :param user_id: an integer number of user id which will be considered as owner (0 for public).
    :param media_files: a list of strings - absolute paths of media files to be processed.
    :return: True.
    """
    passed, lock_passed = Value('i', 0), Lock()
    failed, lock_failed = Value('i', 0), Lock()
    args = [(app_config, user_id, path, passed, lock_passed, failed, lock_failed)
            for path in media_files]
    pool = ThreadPool(2)
    pool.starmap(single_scan, args)
    pool.close()
    pool.join()
    return True
Example #30
0
    def __init__(self, req, url, dirname, filename, block_size=BLOCK_SIZE):
        '''
        req 会话
        url 下载请求url
        dirname 下载目录名
        filename 下载文件名
        '''

        self.req = req
        self.url = url
        self.dir_name = dirname
        self.file_name = filename
        self.block_size = block_size
        self.lock = Lock()
        self.block_done = 0
        self.length = self.get_length()
        self.block_num = self.get_blocknum()
        self.pool = self.get_pool()
        self.progress_show()
        self.status = 'LOAD'
        self.ranges_undo = []
        self.ranges_done = []
def do_pool_commands(cmd, lock=Lock(), shell_var=True):
    '''run pool commands'''
    try:
        output = subprocess.Popen(cmd, shell=shell_var, \
                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        output_stdout, output_stderr = output.communicate()
        with lock:
            print 'running: {}'.format(cmd)
            print output_stdout
            print output_stderr
    except BaseException:
        print "command failed {}".format(cmd)
    return output.wait()
Example #32
0
 def __init__(self, config=None, config_file_path=None):
     self.file_count = 0
     self.status_counts = {}
     self.urls = dict(
     )  # Dictionary of URLs that have been checked, with their broken status and status code
     self.domains = dict(
     )  # Dictionary of URL domains and when they were last requested from (429 code)
     self.unchecked_urls = set()  # Initial set of urls to be checked
     self.delayed_urls = []  # List of urls to be checked later (429 code)
     self.directory = '.'
     self.pool = ThreadPool(THREADS)
     self.lock = Lock()
     if not config and config_file_path:
         logging.info(
             'Using Linkie configuration file {}'.format(config_file_path))
         config = self.read_config(config_file_path)
     elif config:
         logging.info('Using custom Linkie settings via Python constructor')
     elif not config and not config_file_path:
         logging.info('Using default Linkie configuation')
     config = self.check_config(config)
     self.config = self.process_config(config)
Example #33
0
  def __init__(self, release_candidate_id, chef_api=None,
      deploy_user='******'):
    self.chef_api = chef_api
    self.chef_app_prefs = None
    self.deploy_user = deploy_user  # TODO: get this from the project model

    # turn app:app_rev into tarball url
    self.rc = release_candidate.get_by_id(release_candidate_id)
    self.project = project.get_by_id(self.rc['project_id'])
    self.lock = Lock()
    self.nodes = []
    self.code_dir = None
    self.venv_dir = None
Example #34
0
class Deploy(object):
  """Manage data and actions relevant to deploying code
  """

  def __init__(self, release_candidate_id, chef_api=None,
      deploy_user='******'):
    self.chef_api = chef_api
    self.chef_app_prefs = None
    self.deploy_user = deploy_user  # TODO: get this from the project model

    # turn app:app_rev into tarball url
    self.rc = release_candidate.get_by_id(release_candidate_id)
    self.project = project.get_by_id(self.rc['project_id'])
    self.lock = Lock()
    self.nodes = []
    self.code_dir = None
    self.venv_dir = None

  @property
  def available_roles(self):
    """Get the list of available roles for this application. Roles are optional
    and the lack of them should return an empty list in which case every node
    can be deployed to. Throws if the app config hasn't been loaded from chef.
    """
    return chef_role.mget_by_project(self.project['project_id'])

  def deploy(self, env=None, roles=None, run_pre_activate_scripts=False):
    """Perform a deploy.
    """
    # does this app_rev exist for this app?
    #TODO: should we raise here?
    self.code_dir = get_toplevel_dir_from_tarball(
        self.rc['code_tarball_location'])
    self.venv_dir = get_toplevel_dir_from_tarball(
        self.rc['venv_tarball_location'])
    if not self.code_dir or not self.venv_dir:
      return None

    #dn = chef.Node('dev-monitor-001', api=self.chef_api)

    # Are the roles valid?
    target_role_names = set([r['chef_role_name'] for r in roles])
    available_role_names = set([r['chef_role_name']
      for r in self.available_roles])
    if (target_role_names | available_role_names) != available_role_names:
      raise RuntimeError("Attempting to deploy to invalid role: %s" % (
        target_role_names - available_role_names))

    self.load_node_list(roles, env)
    # can we deploy app to these nodes?
    if not self.nodes:
      return None

    # TODO: Upload static assets
    #if 'www' in roles:
      #self.upload_static_assets_to_cdn()

    # transfer tarballs to servers
    self._transfer()

    # Run pre-activate hook script on role migration
    # TODO: Make sure the script exists
    migration_tag = 'gus_migration'
    chef_query = "chef_environment:%s AND tags:%s" % (env['environment_name'],
        migration_tag)
    migration_node = self.chef_api.Search('node', chef_query)
    box = "%s@%s" % (self.deploy_user,
        migration_node['automatic']['ipaddress_eth1'])
    script = os.path.join([self.code_dir, self.project['pre_activate_hook']])
    run_as_subprocess('/usr/bin/ssh', box, '"%s"' % script, self.venv_dir)

    # activate on all machines
    self._activate()

    # TODO: Notify Hipchat

    # TODO: Ping statsd, if applicable

    # TODO: kick off smoke tests, if applicable

  def _transfer(self):
    '''Use a pool of workers to push the tarball out to the specified nodes
    '''
    pool = Pool(min(len(self.nodes, MAX_NUM_WORKERS)))
    res = pool.map(self._transfer_to_node, self.nodes)
    return res

  def _transfer_to_node(self, node):
    # Logging doesn't play nicely with multi-processing, so lock before
    # logging.
    self.lock.acquire()
    log.info("Tranferring tarballs to node: %s", node)
    self.lock.release()
    box = "%s@%s" % (self.deploy_user, node['automatic']['ipaddress_eth1'])

    # Deploy and untar code
    _push_and_untar(box, self.rc['code_tarball_location'],
        self.project['code_target_dir'])

    # Deploy and untar venv
    _push_and_untar(box, self.rc['venv_tarball_location'],
        self.project['venv_target_dir'])

    # Load the application into the virtual env
    venv_activate = "source %s/bin/activate" % self.project['venv_target_dir']
    pip_install = "pip install -e %s" % self.project['code_target_dir']
    run_as_subprocess('/usr/bin/ssh', box,
        '"%s && %s"' % (venv_activate, pip_install))

  def _activate(self):
    pool = Pool(min(len(self.nodes, MAX_NUM_WORKERS)))
    res = pool.map(self._activate_node, self.nodes)
    return res

  def _activate_node(self, node):
    box = "%s@%s" % (self.deploy_user, node['automatic']['ipaddress_eth1'])
    # Cut the code & venv symlinks over
    run_as_subprocess('/usr/bin/ssh', box,
        "'unlink /opt/%(pn)s/app && ln -s %(dir)s /opt/%(pn)s/app'"
        % {'pn': self.project['project_name'], 'dir': self.code_dir})
    run_as_subprocess('/usr/bin/ssh', box,
        "'unlink /opt/%(pn)s/app_virtual_env && " \
        "ln -s %(dir)s /opt/%(pn)s/app_virtual_env'"
        % {'pn': self.project['project_name'], 'dir': self.venv_dir})

    # Restart uwsgi
    raw_status = subprocess.check_output('/usr/bin/ssh', box,
        'supervisorctl status')
    process_list = raw_status.decode('utf8').split("\n")
    for row in process_list:
      if '%s:uwsgi' % self.project['project_name'] in row:
        match = re.search('pid (\d+)', row)
        if match:
          pid = match.group(1)
          run_as_subprocess('/usr/bin/ssh', box, "sudo kill -HUP %s" % pid)

    # Start all supervisor tasks <project>:task:*
    run_as_subprocess('/usr/bin/ssh', box,
        '"supervisorctl restart %s:task:*"' % self.project['project_name'])
    # TODO: run status again to check for errors
    return

  def load_node_list(self, roles, env):
    # Turn role list into node list
    role_clause = ' OR '.join(['role:%s' % r['chef_role_name'] for r in roles])
    chef_query = "chef_environment:%s AND (%s)" % (env['environment_name'],
        role_clause)
    self.nodes = self.chef_api.Search('node', chef_query)

  def _upload_static_assets_to_cdn():
    raise NotImplementedError