예제 #1
0
def encrypt_all_keys_in_prefix(bucket, prefix, encrypt_out, strip_string,
                               use_pywren):
    keys = utils.list_all_keys(prefix)
    print(keys)
    if (use_pywren):
        chunked_keys = utils.chunks(keys, 500)

        def pywren_job(key_chunk):
            for key in key_chunk:
                utils.encrypt_s3_copy_key(key, bucket, encrypt_out,
                                          strip_string)
            return 0

        config = wc.default()
        config['runtime']['s3_bucket'] = 'imagenet2datav2'
        config['runtime'][
            's3_key'] = 'pywren.runtime/pywren_runtime-3.6-imagenet2.tar.gz'
        pwex = pywren.default_executor(config=config)
        print(f"Submitting jobs for {len(keys)} keys")
        futures = pwex.map(pywren_job,
                           chunked_keys,
                           exclude_modules=["site-packages/"])
        pywren.wait(futures)
        [f.result() for f in futures]
    else:
        for key in keys:
            utils.encrypt_s3_copy_key(key, bucket, encrypt_out, strip_string)
예제 #2
0
    def reduce(self,
               function,
               list_of_futures,
               extra_env=None,
               extra_meta=None):
        """
        Apply a function across all futures.

        # FIXME change to lazy iterator
        """
        #if self.invoker.TIME_LIMIT:
        wait(list_of_futures,
             return_when=ALL_COMPLETED)  # avoid race condition

        def reduce_func(fut_list):
            # FIXME speed this up for big reduce
            accum_list = []
            for f in fut_list:
                accum_list.append(f.result())
            return function(accum_list)

        return self.call_async(reduce_func,
                               list_of_futures,
                               extra_env=extra_env,
                               extra_meta=extra_meta)
예제 #3
0
파일: array.py 프로젝트: bebatut/zappy
    def map(self, func, iterables):
        import pywren

        futures = self.pywren_executor.map(
            func, iterables, exclude_modules=self.exclude_modules
        )
        pywren.wait(futures, return_when=pywren.ALL_COMPLETED)
        # Set throw_except to False, since when writing Zarr we don't return anything.
        # However, Pywren should be improved to distinguish between no return value and an exception.
        results = [f.result(throw_except=False) for f in futures]
        if self.record_job_history:
            run_statuses = [f.run_status for f in futures]
            invoke_statuses = [f.invoke_status for f in futures]
            outdict = {
                "futures": futures,
                "run_statuses": run_statuses,
                "invoke_statuses": invoke_statuses,
            }
            logs_dir = os.path.expanduser("~/.zappy/logs")
            os.makedirs(logs_dir, exist_ok=True)
            timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S.%f")
            filename = os.path.join(logs_dir, "pywren-{}.pickle".format(timestamp))
            with open(filename, "wb") as file:
                pickle.dump(outdict, file)
        return results
예제 #4
0
def test_cholesky_lambda():
    X = np.random.randn(128, 128)
    A = X.dot(X.T) + np.eye(X.shape[0])
    shard_size = 128
    shard_sizes = (shard_size, shard_size)
    A_sharded = BigMatrix("job_runner_test",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    A_sharded.free()
    shard_matrix(A_sharded, A)
    program, meta = cholesky(A_sharded)
    executor = fs.ProcessPoolExecutor(1)
    print("starting program")
    program.start()
    pwex = pywren.default_executor()
    futures = pwex.map(
        lambda x: job_runner.lambdapack_run(
            program, timeout=60, idle_timeout=6), range(16))
    pywren.wait(futures)
    print("RESULTSSS")
    print([f.result() for f in futures])
    futures = pwex.map(
        lambda x: job_runner.lambdapack_run(
            program, timeout=60, idle_timeout=6), range(16))
    program.wait()
    #program.free()
    L_sharded = meta["outputs"][0]
    L_npw = L_sharded.numpy()
    L = np.linalg.cholesky(A)
    assert (np.allclose(L_npw, L))
    print("great success!")
예제 #5
0
def gemm_recompute(A, B, thresh, s3_key):
    """    
    Compute A * B.T via speculative execution (i.e., recompute straggling workers).

    Params
    ======
    A : numpywren.matrix.BigMatrix
        First input matrix.
        
    B : numpywren.matrix.BigMatrix
        Second input matrix.
        
    thresh : float (in [0, 1])
        Fraction of workers that should finish before recomputing.
        
    s3_key : str
        Storage key for output matrix.

    Returns
    =======
    C : matrix.BigMatrix
        Resultant matrix product.
        
    t_comp : float
        Time for thresh percentage of the workers to finish.
        
    t_straggle : float
        Time for the remaining 1 - thresh percentage of the workers to finish after
        we begin recomputing.
    """
    if not (0 <= thresh <= 1):
        raise ValueError("thresh must be in the interval [0, 1]")
        
    """Initialize output matrix"""
    num_col_blocks = A.shape[1] // A.shard_sizes[1]
    shard_sizes = (A.shard_sizes[0], B.shard_sizes[0])
    C = matrix.BigMatrix(s3_key, shape=(A.shape[0], B.shape[0]), shard_sizes=shard_sizes, autosqueeze=False, write_header=True)
    C.delete() # Only needed if you reuse the same s3_key (if the blocks already exist, no work will be done here)

    """Stage 1: Compute "thresh" percentage of the results"""
    t_comp_start = time.time()
    pwex = pywren.lambda_executor()
    futures, num_done = pwex.map(lambda x: pywren_gemm(x, A, B, C, num_col_blocks), C.block_idxs), 0
    while num_done < thresh * len(futures):
        fs_dones, _ = pywren.wait(futures, return_when=ANY_COMPLETED)
        num_done = len(fs_dones)
    t_comp = time.time() - t_comp_start # Total stage 1 time

    """Stage 2: Recompute straggling workers (the last 1-thresh percent of jobs)"""
    t_straggle_start = time.time()
    futures_stragglers = pwex.map(lambda x: pywren_gemm(x, A, B, C, num_col_blocks), C.block_idxs_not_exist)
    while len(C.block_idxs_not_exist) > 0: 
        pywren.wait(futures, return_when=ALWAYS)
        pywren.wait(futures_stragglers, return_when=ALWAYS)
    t_straggle = time.time() - t_straggle_start # Total stage 2 time
    
    return C, t_comp, t_straggle
예제 #6
0
def batch_candidates(candidates,
                     batch_size,
                     prefix='imagenet2candidates_batches',
                     bucket='imagenet2datav2',
                     verbose=True):
    import pywren
    from pywren import wrenconfig as wc

    def hash_ids(ids):
        return hashlib.sha256((','.join(ids)).encode()).hexdigest()

    def create_and_store_batch(batch_ids):
        batch_key = hash_ids(batch_ids)
        full_key = os.path.join(prefix, batch_key) + '.pickle'
        data = {}
        for cur_id in batch_ids:
            cur_key = 'imagenet2candidates_scaled/' + cur_id + '.jpg'
            data[cur_id], _ = utils.get_s3_object_bytes_with_backoff(cur_key, bucket=bucket)
        client = utils.get_s3_client()
        client.put_object(Key=full_key, Bucket=bucket, Body=pickle.dumps(data))
        return (batch_key, batch_ids)
    candidates = candidates.copy()
    cids = [c['id_ours'] for c in candidates]
    batches = list(utils.chunks(cids, batch_size))
    num_batches = len(batches)
    if verbose:
        print('Creating {} batches of size {} ...'.format(num_batches, batch_size))
    
    pywren_config = wc.default()
    pywren_config['runtime']['s3_bucket'] = 'imagenet2pywren'
    pywren_config['runtime']['s3_key'] = 'pywren.runtime/pywren_runtime-3.6-imagenet2pywren.tar.gz'
    pwex = pywren.default_executor(config=pywren_config)
    print(f"Mapping over {len(batches)} images")
    futures = pwex.map(create_and_store_batch,
                       batches,
                       exclude_modules=["site-packages"])
    ALWAYS = 3
    done, not_done = pywren.wait(futures, ALWAYS)
    while len(not_done) > 0:
        done, not_done = pywren.wait(futures, ALWAYS)
        time.sleep(1)
    print('done')
    result = {}
    for res in done:
        actual_res = res.result()
        for cid in actual_res[1]:
            result[cid] = actual_res[0]
    print(len(result))
    for cand in candidates:
        assert cand['id_ours'] in result
        cand['batch'] = result[cand['id_ours']]
    return candidates
예제 #7
0
def main():
    initialize_db()
    wren_executor = pywren.default_executor()

    start_time = time.time()
    with open(ARTICLES_FILE_PATH, encoding=ENCODING) as articles_file:
        data = json.loads(articles_file.read())
        futures = wren_executor.map(create_nodes, data['titles'])
        pywren.wait(futures)
        futures = wren_executor.map(create_edges, data['titles'])
        pywren.wait(futures)
    end_time = time.time()

    print('Duration: {}'.format(end_time - start_time))
예제 #8
0
def argmin(pwex, X, out_bucket=None, tasks_per_job=1):
    futures = pwex.map(lambda x: _argmin_remote(x, X), X.block_idxs)
    pywren.wait(futures)
    results = [f.result() for f in futures]
    if (axis == None):
        groups = [(None, results)]
    else:
        groups = itertools.groupby(sorted(results, key=itemgetter(axis)), key=itemgetter(0))
    results = []
    for _, group in groups:
        group = list(group)
        argmins = np.concatenate([g[1] for g in group], axis=axis)
        argminmin = np.argmin(np.vstack([g[2] for g in group]), axis=axis)
        results.append(argmins[argminmin, :])
    return np.hstack(results)
예제 #9
0
    def test_map_doublewait(self):
        """
        Make sure we can call wait on a list of futures twice
        """
        def plus_one(x):
            return x + 1
        N = 10

        x = np.arange(N)
        futures = self.wrenexec.map(plus_one, x)
        pywren.wait(futures)
        pywren.wait(futures)

        res = np.array([f.result() for f in futures])
        np.testing.assert_array_equal(res, x + 1)
예제 #10
0
def reshard_down(bigm, breakdowns, pwex=None):
    ''' Return a new bigm whose shard sizes are bigm.shard_sizes/break_downs
        if a pwex is provided reshards in parallel, else reshards locally (very slow)
        This will essentially break down a single block in bigm into several evenly sized sub blocks, breakdowns is a list of integers detailing how much to break a given dimension down into. breakdowns = [2,2] would break each dimension down into 2 independent block so a 4 x 4 block would be replaced by 4, 2 x 2 blocks.
    '''

    for x,y in zip(bigm.shard_sizes, breakdowns):
        assert x % y == 0

    new_shard_sizes = [int(x/y) for x,y in zip(bigm.shard_sizes, breakdowns)]

    X_sharded_new = BigMatrix("reshard({0},{1})".format(bigm.key, breakdowns), bucket=bigm.bucket, shape=bigm.shape, shard_sizes=new_shard_sizes)

    chunked_idxs = []
    chunked_absolute_idxs = []
    for i in range(len(bigm.shape)):
        chunked_idxs.append([tuple(x) for x in matrix_utils.chunk(X_sharded_new._block_idxs(i), breakdowns[i])])
        chunked_absolute_idxs.append([tuple(x) for x in matrix_utils.chunk(X_sharded_new._blocks(i), breakdowns[i])])


    idxs = [bigm._block_idxs(i) for  i in range(len(bigm.shape))]
    all_idxs_new = list(itertools.product(*chunked_idxs))
    all_idxs_old = list(itertools.product(*idxs))
    all_idxs_new_absolute = list(itertools.product(*chunked_absolute_idxs))
    idx_info = list(zip(all_idxs_new, all_idxs_old, all_idxs_new_absolute))


    def reshard_func(bidx_info, bigm, bigm_new):
        idxs_new, idx_old, idx_absolute = bidx_info
        data = bigm.get_block(*idx_old)
        logical = list(itertools.product(*idxs_new))
        absolute = list(itertools.product(*idx_absolute)) 
        offsets = [x[0][0] for x in idx_absolute]
        for lidx, aidx in zip(logical, absolute):
            aidx_offsets = [ slice(x[0] - ox, x[1] - ox)  for x,ox in zip(aidx, offsets)]
            sub_data = data.__getitem__(aidx_offsets)
            print(lidx, aidx, idx_old)
            bigm_new.put_block(sub_data, *lidx)

    if (pwex is None):
        [reshard_func(x, bigm, X_sharded_new) for x in idx_info]
    else:

        futures = pwex.map(lambda x: reshard_func(x, bigm, X_sharded_new), idx_info)
        pywren.wait(futures)
        [f.result() for f in futures]

    return X_sharded_new
예제 #11
0
def pytry(fn, arg, DEBUG=False):
    # Script to attempt a pywren job
    tries = 10
    results = []
    while tries > 0:
        try:
            pwex = pywren.default_executor()
            futures = pwex.map(fn, arg)
            dones, not_dones = pywren.wait(futures, pywren.ALL_COMPLETED)
            results = [f.result() for f in dones]
        except Exception as e:
            raise
            if DEBUG:
                pickle.dump({
                    "e": e,
                    "futures": futures
                }, open("debug.pickle", 'wb'))
                print('Pickle')
                return None
            print('oops')
            tries -= 1
        else:
            print('OK')
            return results
    print('NOT OK')
    return results
예제 #12
0
def benchmark(loopcount, workers, matn, verbose=False):

    t1 = time.time()
    N = workers

    iters = np.arange(N)

    def f(x):
        return {'flops': compute_flops(loopcount, matn)}

    pwex = pywren.lambda_executor(shard_runtime=True)
    futures = pwex.map(f, iters)

    print("invocation done, dur=", time.time() - t1)
    print("callset id: ", futures[0].callset_id)

    local_jobs_done_timeline = []
    result_count = 0
    while result_count < N:
        fs_dones, fs_notdones = pywren.wait(futures, pywren.wren.ALWAYS)
        result_count = len(fs_dones)

        local_jobs_done_timeline.append((time.time(), result_count))
        est_flop = 2 * result_count * loopcount * matn**3

        est_gflops = est_flop / 1e9 / (time.time() - t1)
        if verbose:
            print("jobs done: {:5d}    runtime: {:5.1f}s   {:8.1f} GFLOPS ".
                  format(result_count,
                         time.time() - t1, est_gflops))

        if result_count == N:
            break

        time.sleep(1)
    if verbose:
        print("getting results")
    results = [f.result(throw_except=False) for f in futures]
    if verbose:
        print("getting status")
    run_statuses = [f.run_status for f in futures]
    invoke_statuses = [f.invoke_status for f in futures]

    all_done = time.time()
    total_time = all_done - t1
    print("total time", total_time)
    est_flop = result_count * 2 * loopcount * matn**3

    print est_flop / 1e9 / total_time, "GFLOPS"
    res = {
        'total_time': total_time,
        'est_flop': est_flop,
        'run_statuses': run_statuses,
        'invoke_statuses': invoke_statuses,
        'callset_id': futures[0].callset_id,
        'local_jobs_done_timeline': local_jobs_done_timeline,
        'results': results
    }
    return res
def benchmark(outfile, workers, experiments, eta, sleep):

    experiment_data = []
    for exp_i in range(experiments):
        print "running experiment {} ---------------------------".format(exp_i)
        N = workers
        t1 = time.time()
        iters = np.arange(N)

        def fingerprint(x):
            timing_responses = {}
            for server in exampleutils.NTP_SERVERS:
                ts_os = exampleutils.get_time_offset(server, 4)
                timing_responses[server] = ts_os

            hwaddr = exampleutils.get_hwaddr()
            uptime = exampleutils.get_uptime()[0]
            time.sleep(sleep)
            return {
                'hw_addr': hwaddr,
                'ntp_offsets': timing_responses,
                'uptime': uptime
            }

        pwex = pywren.default_executor()
        futures = pwex.map(fingerprint, iters)

        print "invocation done, dur=", time.time() - t1
        print "callset id: ", futures[0].callset_id
        fs_dones, fs_notdones = pywren.wait(futures)
        # get the job state of all of them
        print len(fs_dones), len(fs_notdones)
        for f in futures:
            if f._state == pywren.wren.JobState.success or f._state == pywren.wren.JobState.error:
                pass
            else:
                print f._state

        print "getting results"
        results = [f.result() for f in futures]
        print "getting status"
        run_statuses = [f._run_status for f in futures]
        t2 = time.time()
        total_time = t2 - t1
        sleep_for = eta**exp_i
        exp = {
            'total_time': total_time,
            'exp_i': exp_i,
            'sleep_for': sleep_for,
            'run_statuses': run_statuses,
            'callset_id': futures[0].callset_id,
            'results': results
        }
        experiment_data.append(exp)
        print "seeping for", sleep_for / 60.0, "min"
        time.sleep(sleep_for)
    pickle.dump(experiment_data, open(outfile, 'w'), -1)
예제 #14
0
    def test_numba(self):
        """
        Simple numba test, compares two loops, makes sure
        one runs much faster than the other

        """

        N = 10000000
        results = self.wrenexec.map(time_foo, [N])
        pywren.wait(results)
        regular_time = results[0].result()
        print('regular time', regular_time)

        results = self.wrenexec.map(time_bar, [N])
        pywren.wait(results)
        numba_time = results[0].result()
        print('numba time', numba_time)

        speed_gain = regular_time / numba_time

        self.assertTrue(speed_gain > 8.0)
예제 #15
0
    def start(self, parallel=False):
        put(self.control_plane.client, self.hash, PS.RUNNING.value)
        print("len starters", len(self.program.starters))
        chunked_starters = chunk(self.program.starters, 100)

        def start_chunk(c):
            sqs = boto3.resource('sqs')
            queue = sqs.Queue(self.queue_urls[0])
            for x in c:
                self.set_node_status(*x, NS.READY)
                queue.send_message(MessageBody=json.dumps(
                    [x[0], {str(key): val
                            for key, val in x[1].items()}]))

        if (parallel):
            pwex = pywren.default_executor()
            futures = pwex.map(start_chunk, chunked_starters)
            pywren.wait(futures)
        else:
            for c in chunked_starters:
                start_chunk(c)
            return 0
예제 #16
0
파일: array.py 프로젝트: lasersonlab/zappy
    def map(self, func, iterables):
        import pywren

        futures = self.pywren_executor.map(
            func, iterables, exclude_modules=self.exclude_modules)
        pywren.wait(futures, return_when=pywren.ALL_COMPLETED)
        results = [f.result() for f in futures]
        if self.record_job_history:
            run_statuses = [f.run_status for f in futures]
            invoke_statuses = [f.invoke_status for f in futures]
            outdict = {
                "futures": futures,
                "run_statuses": run_statuses,
                "invoke_statuses": invoke_statuses,
            }
            logs_dir = os.path.expanduser("~/.zappy/logs")
            os.makedirs(logs_dir, exist_ok=True)
            timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S.%f")
            filename = os.path.join(logs_dir,
                                    "pywren-{}.pickle".format(timestamp))
            with open(filename, "wb") as file:
                pickle.dump(outdict, file)
        return results
예제 #17
0
    def test_all_complete(self):
        def wait_x_sec_and_plus_one(x):
            time.sleep(x)
            return x + 1

        N = 10
        x = np.arange(N)

        futures = pywren.default_executor().map(wait_x_sec_and_plus_one, x)

        fs_dones, fs_notdones = pywren.wait(futures,
                                        return_when=pywren.wren.ALL_COMPLETED)
        res = np.array([f.result() for f in fs_dones])
        np.testing.assert_array_equal(res, x+1)
예제 #18
0
    def test_multiple_callset_id_diff_executors(self):
        def wait_x_sec_and_plus_one(x):
            time.sleep(x)
            return x + 1

        N = 10
        x = np.arange(N)

        futures1 = pywren.default_executor().map(wait_x_sec_and_plus_one, x)
        futures2 = pywren.default_executor().map(wait_x_sec_and_plus_one, x)

        fs_dones, fs_notdones = pywren.wait(futures1 + futures2,
                return_when=pywren.wren.ALL_COMPLETED)
        res = np.array([f.result() for f in fs_dones])
        np.testing.assert_array_equal(res, np.concatenate((x,x))+1)
예제 #19
0
def wait_for_futures(futures, print_frequency=100, raise_exception=True):
    results = []
    retrieved = {}
    call_id_to_failed_future = {}
    done, not_dones = pywren.wait(futures, ALWAYS)
    while len(not_dones) > 0:
        done, not_dones = pywren.wait(futures, ALWAYS)
        for finished_future in done:
            #print('finished future')
            if finished_future not in retrieved:
                try:
                    if "stdout" in finished_future.run_status:
                        if len(finished_future.run_status["stdout"]) > 0:
                            print(finished_future.run_status["stdout"])
                    #print('Adding finished future to result')
                    results.append(finished_future.result())
                    retrieved[finished_future] = True
                    if len(retrieved) % print_frequency == 0:
                        timing_results = [
                            timing_info for _, timing_info in results
                        ]
                        summarize_timing_infos(timing_results)
                except:
                    if finished_future._traceback is not None:
                        print("Future exception traceback was ",
                              finished_future._traceback)
                    print('future call_id {} failed'.format(
                        finished_future.call_id))
                    retrieved[finished_future] = True
                    call_id_to_failed_future[int(
                        finished_future.call_id)] = finished_future
                    if raise_exception:
                        reraise(finished_future._traceback[0],
                                finished_future._traceback[1],
                                finished_future._traceback[2])
    return results, call_id_to_failed_future
예제 #20
0
    def test_map(self):

        def plus_one(x):
            return x + 1
        N = 10

        x = np.arange(N)
        futures = self.wrenexec.map(plus_one, x)

        result_count = 0
        while result_count < N:
            
            fs_dones, fs_notdones = pywren.wait(futures)
            result_count = len(fs_dones)

        res = np.array([f.result() for f in futures])
        np.testing.assert_array_equal(res, x + 1)
def coded_mat_vec_mul(A_coded_2D, x, num_parity_blocks, coding_length):
    def coded_mat_vec_mul(id):
        shard_size = A_coded_2D.shard_sizes[1]
        reduce_idxs = A_coded_2D._block_idxs(axis=1)
        x_loc = x.get_block(0, 0)
        Ax_block = None
        for r in reduce_idxs:
            block1 = A_coded_2D.get_block(id, r)
            sidx = r * shard_size
            eidx = (r + 1) * shard_size
            x_block = x_loc[sidx:eidx]
            if (Ax_block is None):
                Ax_block = block1.dot(x_block)
            else:
                Ax_block = Ax_block + block1.dot(x_block)
        return Ax_block

    shard_size = A_coded_2D.shard_sizes[0]
    n_coded_procs = len(A_coded_2D._block_idxs(0))
    len_A_coded = n_coded_procs - coding_length - 1

    pwex = pywren.lambda_executor()
    futures = pwex.map(coded_mat_vec_mul, range(n_coded_procs))
    Ax = np.zeros((A_coded_2D.shape[0], 1))
    bitmask = np.ones((coding_length + 1, num_parity_blocks + 1))
    not_done = list(range(n_coded_procs))
    while decode2D.cant_be_decoded_with_bitmask(deepcopy(bitmask)):
        fs_dones, fs_not_dones = pywren.wait(futures, 2)
        for (id, f) in enumerate(futures):
            if f in fs_dones and id in not_done:
                # print("Worker done", id)
                try:
                    Ax[id * shard_size:(id + 1) * shard_size] = f.result()
                    i, j = decode2D.ind1Dto2D(id, len_A_coded,
                                              num_parity_blocks)
                    bitmask[i, j] = 0
                    not_done.remove(id)
                except Exception as e:
                    print(e)
                    pass
    # print("1: Decoding not dones", not_done)
    Ax = decode2D.decode_vector_with_bitmask(Ax, bitmask, num_parity_blocks,
                                             len_A_coded, shard_size,
                                             coding_length)
    return Ax
예제 #22
0
    def test_any_complete(self):
        def wait_x_sec_and_plus_one(x):
            time.sleep(x)
            return x + 1

        N = 10
        x = np.arange(N)

        futures = pywren.default_executor().map(wait_x_sec_and_plus_one, x)

        fs_notdones = futures
        while (len(fs_notdones) > 0):
            fs_dones, fs_notdones = pywren.wait(fs_notdones,
                                            return_when=pywren.wren.ANY_COMPLETED,
                                            WAIT_DUR_SEC=1)
            self.assertTrue(len(fs_dones) > 0)
        res = np.array([f.result() for f in futures])
        np.testing.assert_array_equal(res, x+1)
예제 #23
0
def progwait(fs, desc="", notebook=False):
    """
    A tqdm-based progress bar that looks nice and gives you an
    idea of how long until your futures are done
    """

    N = len(fs)
    result_count = 0
    fs_dones = []
    fs_notdones = fs

    if notebook:
        from tqdm import tqdm_notebook as tqdm_func
    else:
        from tqdm import tqdm as tqdm_func

    with tqdm_func(total=N, desc=desc) as pbar:
        while len(fs_dones) < N:
            new_fs_dones, new_fs_notdones = pywren.wait(
                fs_notdones, return_when=pywren.ANY_COMPLETED)
            fs_dones += new_fs_dones
            fs_notdones = new_fs_notdones
            pbar.update(len(new_fs_dones))
    return fs_dones, fs_notdones
예제 #24
0
    def process_pending(self):
        active_map_ids = list(self.active_iters.keys())
        logger.info("ppc={} begin process pending len_active_map_ids={}".format(self._process_pending_count, len(active_map_ids)))

        for map_id in active_map_ids:
            logger.debug("processing map_id={} {}".format(map_id, len(self.active_iters[map_id])))
            # get a future set
            iter_futures = self.active_iters[map_id]
            # group these by callset ID
            f_by_callset_id = {}
            for f in iter_futures:
                pwf = f.current_future
                if pwf.callset_id not in f_by_callset_id:
                    f_by_callset_id[pwf.callset_id] = []
                f_by_callset_id[pwf.callset_id].append(pwf)

            ## call WAIT on everyone 
            logger.debug("map_id={} starting status check".format(map_id))
            for cs, flist in f_by_callset_id.items():
                logger.debug("map_id={} calling wait for callset_id={} len_futures={}".format(map_id,cs, len(flist)))

                # this will trigger an update on all of them
                fs_done, fs_notdone = pywren.wait(flist, return_when=pywren.ALWAYS, # ANY_COMPLETED, 
                                                  WAIT_DUR_SEC=1)
                logger.debug("map_id={} wait done for callset_id={} len_fs_done={}".format(map_id,cs, len(fs_done)))

            logger.debug("map_id={} status check done for all f in map_id".format(map_id))

            to_advance = []
            to_remove = []
            still_waiting = []
            for f in iter_futures:
                pwf = f.current_future
                if f.current_iter == f.max_iter:
                    to_remove.append(f)
                else:
                    if pwf.succeeded():
                        to_advance.append(f)
                    elif pwf.errored():
                        logger.debug("map_id={} map_pos={} errored on iter {}".format(map_id, 
                                                                                      f.original_map_pos, 
                                                                                      f.current_iter))
                        to_remove.append(f)
                    else:
                        still_waiting.append(f)
            logger.debug("map_id={} to_advance={}".format(map_id, get_map_pos(to_advance)))
            logger.debug("map_id={} to_remove={}".format(map_id, get_map_pos(to_remove)))
            logger.debug("map_id={} still_waiting={}".format(map_id, get_map_pos(still_waiting)))

            if len(to_advance) > 0:

                # construct next invocation
                wrapped_args = [(f.current_iter + 1,
                                 f.current_future.result(),
                                 f.arg) for f in to_advance ]

                wrapped_func = self.wrapped_funcs[map_id]
                logger.debug("map_id={} invoking new map with {} args".format(map_id, len(wrapped_args)))
                pywren_futures = self.wrenexec.map(wrapped_func,
                                                   wrapped_args,
                                                   exclude_modules=EXCLUDE_MODULES)
                logger.debug("map_id={} invoking new map done".format(map_id))
                for f, pwf in zip(to_advance, pywren_futures):
                    if f.save_iters:
                        f.iter_hist.append(f.current_future)
                    f.current_future = pwf
                    f.current_iter += 1


            # remove these from current map id
            to_remove_map_pos = [f.original_map_pos for f in to_remove]
            self.active_iters[map_id] = [f for f in self.active_iters[map_id] if f.original_map_pos \
                                         not in to_remove_map_pos]
            if len(self.active_iters[map_id]) == 0:
                logger.debug("map_id={} deleted".format(map_id))
                del self.active_iters[map_id]
                del self.wrapped_funcs[map_id]
        logger.info("ppc={} end process pending".format(self._process_pending_count))
        self._process_pending_count += 1
def code_2D(A, num_parity_blocks, thres=1):
    assert (len(A._block_idxs(0)) % num_parity_blocks == 0)
    shard_size = A.shard_sizes[0]
    coded_shape = (A.shape[0] + num_parity_blocks * A.shard_sizes[0],
                   A.shape[1])
    coding_length = int(np.ceil(len(A._block_idxs(0)) / num_parity_blocks))
    coding_fn2D = make_coding_function2D(A, coding_length)

    coded_2D_shape = (
        A.shape[0] +
        (coding_length + 1 + num_parity_blocks) * A.shard_sizes[0], A.shape[1])
    A_coded_2D = matrix.BigMatrix(A.key + "CODED2D_{0}_{1}_{2}".format(
        A.shape[0], shard_size, num_parity_blocks),
                                  shape=coded_2D_shape,
                                  shard_sizes=A.shard_sizes,
                                  write_header=True,
                                  parent_fn=coding_fn2D)

    # if list(set(A_coded_2D.block_idxs_not_exist) - set(A.block_idxs_exist)) == []:
    #     return A_coded_2D

    last_block = max(A._block_idxs(0))
    columns = A_coded_2D._block_idxs(1)
    rows = A_coded_2D._block_idxs(0)
    to_read = []
    blocks_exist = A_coded_2D.block_idxs_exist
    for row in rows:
        if (row <= last_block): continue
        for column in columns:
            if (row, column) in blocks_exist:
                continue
            else:
                to_read.append((row, column))

    print("Number of parity blocks", len(to_read))

    num_parities_1D = coding_length * len(A._block_idxs(1))
    to_read_phase1 = to_read[0:num_parities_1D]
    to_read_phase2 = to_read[num_parities_1D:]

    def get_block_wrapper(x):
        A_coded_2D.get_block(*x)
        return 0

    #### For 2D ENCODING of A, uncomment
    pwex = pywren.lambda_executor()
    t_enc1 = time.time()
    futures2 = pwex.map(get_block_wrapper, to_read_phase1)
    result_count = 0
    fs_dones = []
    while (result_count < thres * len(to_read_phase1)):
        fs_dones, fs_notdones = pywren.wait(futures2, 2)
        result_count = len(fs_dones)
        print(result_count)
        time.sleep(3)
    for f in fs_dones:
        try:
            f.result()
        except Exception as e:
            print(e)
            pass
    t_enc1 = time.time() - t_enc1
    print("Encoding phase 1 time", t_enc1)

    t_enc2 = time.time()
    futures2 = pwex.map(get_block_wrapper, to_read_phase2)
    result_count = 0
    while (result_count < thres * len(to_read_phase2)):
        fs_dones, fs_notdones = pywren.wait(futures2, 2)
        result_count = len(fs_dones)
        print(result_count)
        time.sleep(3)
    for f in fs_dones:
        try:
            f.result()
        except Exception as e:
            print(e)
            pass
    t_enc2 = time.time() - t_enc2
    print("Encoding phase 2 time", t_enc2)
    print("Total ENCODING time", t_enc1 + t_enc2)

    # a = list(set(A_coded_2D.block_idxs_not_exist) - set(A.block_idxs_exist))
    # print("Still to encode", a)
    return A_coded_2D
예제 #26
0
def write_data():
    def run_command(key):
        """
        keylist.append({'taskId': i,
                        'job_number': job_number,
                        'total_input': numTasks,
                        'write_element_size': write_element_size,
                        'process_time': process_time,
                        'total_time': total_time,
                        'redis': redisnode})
        """
        pywren.wrenlogging.default_config('INFO')
        begin_of_function = time.time()
        logger = logging.getLogger(__name__)
        logger.info("taskId = " + str(key['taskId']))
        taskId = key['taskId']
        jobid_int = int(key['job_number'])
        write_element_size = int(key['write_element_size'])
        process_time = int(key['process_time'])
        total_time = int(key['total_time'])

        rs = []
        #for hostname in key['redis'].split(";"):
        #    r1 = StrictRedis(host=hostname, port=6379, db=0).pipeline()
        #    rs.append(r1)
        #r1 = StrictRedis(host="172.31.12.131", port=6379, db=0).pipeline()
        #rs.append(r1)
        #nrs = len(rs)
        nrs = 1

        [read_time, work_time, write_time] = [0] * 3
        start_time = time.time()

        # a total of 10 threads
        number_of_clients = 1
        write_pool = ThreadPool(number_of_clients)

        time.sleep(process_time)

        logger.info("Process finish here: " + str(time.time()))

        def write_work_client(writer_key):
            start_time = time.time()
            client_id = int(writer_key['client_id'])
            taskID = writer_key['taskId']
            jobID = writer_key['jobid']
            datasize = writer_key['write_element_size']
            #datasize = 1310720
            total_time = writer_key['total_time']
            body = b'a' * datasize
            client_id = int(client_id)
            count = 0
            throughput_step = 1
            throughput_count = 1
            throughput_total = 0
            throughput_nops = 0
            ret = []
            while time.time() < start_time + total_time:
                count = count + 1
                keyname = str(jobID) + "-" + str(taskID) + "-" + str(count)
                m = hashlib.md5()
                m.update(keyname.encode('utf-8'))
                ridx = int(m.hexdigest()[:8], 16) % nrs
                randomized_keyname = str(jobID) + "-" + str(
                    taskID) + '-' + m.hexdigest()[:8] + '-' + str(count)
                #logger.info("(" + str(taskId) + ")" + "The name of the key to write is: " + randomized_keyname)
                start = time.time()
                logger.info("[REDIS] [" + str(jobID) + "] " +
                            str(time.time()) + " " + str(taskID) + " " +
                            str(len(body)) + " write " + "S")
                #rs[ridx].set(randomized_keyname, body)
                end = time.time()
                logger.info("[REDIS] [" + str(jobID) + "] " +
                            str(time.time()) + " " + str(taskID) + " " +
                            str(len(body)) + " write " + "E ")
                #for r in rs:
                #    r.execute()
                throughput_total += end - start
                throughput_nops += 1
                if end - start_time >= throughput_count:
                    throughput = throughput_nops / throughput_total
                    ret.append((end, throughput))
                    throughput_nops = 0
                    throughput_count += throughput_step
                    throughput_total = 0

            logger.info("Write finish here: " + str(time.time()))
            return ret

        writer_keylist = []
        number_of_clients = int(number_of_clients)
        for i in range(number_of_clients):
            writer_keylist.append({
                'client_id': i,
                'taskId': taskId,
                'jobid': jobid_int,
                'write_element_size': write_element_size,
                'total_time': total_time
            })

        start_time = time.time()
        write_pool_handler_container = []
        write_pool_handler = write_pool.map_async(write_work_client,
                                                  writer_keylist)
        write_pool_handler_container.append(write_pool_handler)

        if len(write_pool_handler_container) > 0:
            write_pool_handler = write_pool_handler_container.pop()
            ret = write_pool_handler.get()
            twait_end = time.time()
            write_time = twait_end - start_time
        write_pool.close()
        write_pool.join()
        end_of_function = time.time()
        return begin_of_function, end_of_function, write_time, ret

    numTasks = int(sys.argv[1])
    job_number = int(sys.argv[2])
    write_element_size = int(sys.argv[3])
    process_time = int(sys.argv[4])  # microseconds
    total_time = int(sys.argv[5])
    redisnode = sys.argv[6]

    keylist = []

    for i in range(numTasks):
        keylist.append({
            'taskId': i,
            'job_number': job_number,
            'total_input': numTasks,
            'write_element_size': write_element_size,
            'process_time': process_time,
            'total_time': total_time,
            'redis': redisnode
        })

    wrenexec = pywren.default_executor()
    futures = wrenexec.map(run_command, keylist)
    pywren.wait(futures)
    results = [f.result() for f in futures]

    print("Write " + str(job_number))
    run_statuses = [f.run_status for f in futures]
    invoke_statuses = [f.invoke_status for f in futures]
    res = {
        'results': results,
        'run_statuses': run_statuses,
        'invoke_statuses': invoke_statuses
    }
    filename = "redis-write-" + ".pickle.breakdown"
    pickle.dump(res, open(filename, 'wb'))
    return res
예제 #27
0
    MAT_N = 4096

    iters = np.arange(N)

    def f(x):
        return compute_flops(LOOPCOUNT, MAT_N)

    pwex = pywren.default_executor()
    futures = pwex.map(f, iters)

    print "invocation done, dur=", time.time() - t1
    print futures[0].callset_id

    result_count = 0
    while result_count < N:
        fs_dones, fs_notdones = pywren.wait(futures)
        result_count = len(fs_dones)

        est_flop = 2 * result_count * LOOPCOUNT * MAT_N**3

        est_gflops = est_flop / 1e9 / (time.time() - t1)
        print "jobs done: {:5d}    runtime: {:5.1f}s   {:8.1f} GFLOPS ".format(
            result_count,
            time.time() - t1, est_gflops)

        if result_count == N:
            break

        time.sleep(1)

    all_done = time.time()
예제 #28
0
                "/tmp/gensort", "-b" + str(begin),
                str(number_of_records), "/dev/stdout"
            ])
            keyname = "input/part-" + str(key)
            m = hashlib.md5()
            m.update(keyname.encode('utf-8'))
            logging.info("Here is the key! " + str(key))
            randomized_keyname = "input/" + m.hexdigest()[:8] + "-part-" + str(
                key)
            put_start = time.time()
            client.put_object(Body=data,
                              Bucket="yupeng-pywren-0",
                              Key=randomized_keyname)
            put_end = time.time()
            logger.info(
                str(key) + " th object uploaded using " +
                str(put_end - put_start) + " seconds.")
            gc.collect()
            key = key + 1

    wrenexec = pywren.default_executor()
    num_of_files = int(sys.argv[1])
    print("Generating " + str(62.5 * int(num_of_files)) + " Mb input dataset.")
    passed_tasks = range(0, num_of_files, 5)

    fut = wrenexec.map(run_command, passed_tasks)

    pywren.wait(fut)
    res = [f.result() for f in fut]
    print(res)
예제 #29
0
def gemm(pwex,
         X,
         Y,
         out_bucket=None,
         tasks_per_job=1,
         local=False,
         dtype=np.float64,
         overwrite=True,
         gemm_impl=0,
         gemm_chunk_size=16):
    '''
        Compute XY return
        @param pwex - Execution context
        @param X - rhs matrix
        @param Y - lhs matrix
        @param tasks_per_job - number of tasks per job
        @param out_bucket - bucket job writes to
        @param num_jobs - how many lambdas to run
        @param local - run locally? #TODO remove once local pywren executor is provided
    '''
    # 0 -> 1 or 1 -> 0

    reduce_idxs = Y._block_idxs(axis=0)
    if (out_bucket == None):
        out_bucket = X.bucket

    root_key = generate_key_name_binop(X, Y, "gemm")
    if (Y.shard_sizes[0] != X.shard_sizes[1]):
        raise Exception("X dim 1 shard size must match Y dim 0 shard size")
    XY = BigMatrix(root_key,
                   shape=(X.shape[0], Y.shape[1]),
                   bucket=out_bucket,
                   shard_sizes=[X.shard_sizes[0], Y.shard_sizes[1]],
                   dtype=dtype,
                   write_header=True)

    num_out_blocks = len(XY.blocks)
    if (tasks_per_job > num_out_blocks):
        tasks_per_job = 1
    num_jobs = int(num_out_blocks / float(tasks_per_job))

    print("Out Shape", XY.shape)
    print("Total number of output blocks", len(XY.block_idxs))
    print("Total number of output blocks that exist", len(XY.blocks_exist))

    if (overwrite):
        block_idxs_to_map = list(set(XY.block_idxs))
    else:
        block_idxs_to_map = list(set(XY.block_idxs_not_exist))

    print("Number of output blocks to generate ", len(block_idxs_to_map))
    chunked_blocks = list(
        chunk(list(chunk(block_idxs_to_map, tasks_per_job)), num_jobs))
    if (not isinstance(pwex.invoker, pywren.queues.SQSInvoker)
            and gemm_impl > 0):
        raise Exception(
            "GEMM IMPL > 0 only supported for standalone mode pywren")

    print(_gemms[gemm_impl])

    def pywren_run(x):
        return _gemms[gemm_impl](x,
                                 XY,
                                 X,
                                 Y,
                                 reduce_idxs=reduce_idxs,
                                 dtype=dtype,
                                 block_chunk_size=gemm_chunk_size)

    all_futures = []
    for i, c in enumerate(chunked_blocks):
        print("Submitting job for chunk {0} in axis 0".format(i))
        if (local):
            list(map(pywren_run, c))
        else:
            s = time.time()
            futures = pwex.map(pywren_run,
                               c,
                               exclude_modules=["site-packages"])
            e = time.time()
            print("Pwex Map Time {0}".format(e - s))
            all_futures.append((i, futures))

    if (local):
        return XY

    for i, futures, in all_futures:
        print("waiting")
        pywren.wait(futures)
        [f.result() for f in futures]

    return XY
def recompute_mat_vec_mul(A_coded_2D, x, thres=0.95):
    def shard_mat_vec_mul(id):
        shard_size = A_coded_2D.shard_sizes[1]
        reduce_idxs = A_coded_2D._block_idxs(axis=1)
        x_loc = x.get_block(0, 0)
        Ax_block = None
        for r in reduce_idxs:
            block1 = A_coded_2D.get_block(id, r)
            sidx = r * shard_size
            eidx = (r + 1) * shard_size
            x_block = x_loc[sidx:eidx]
            if (Ax_block is None):
                Ax_block = block1.dot(x_block)
            else:
                Ax_block = Ax_block + block1.dot(x_block)
        return Ax_block, id

    shard_size = A_coded_2D.shard_sizes[0]
    n_coded_procs = len(A_coded_2D._block_idxs(0))

    pwex = pywren.lambda_executor()
    futures = pwex.map(shard_mat_vec_mul, range(n_coded_procs))
    Ax = np.zeros((A_coded_2D.shape[0], 1))
    not_done = list(range(n_coded_procs))
    fs_dones = []
    f_result_dones = []
    while len(fs_dones) < thres * n_coded_procs:
        fs_dones, fs_not_dones = pywren.wait(futures, 2)
        for f in list(set(fs_dones) - set(f_result_dones)):
            # print("Worker done", id)
            f_result_dones.append(f)
            try:
                result = f.result()
                id = result[1]
                Ax[id * shard_size:(id + 1) * shard_size] = result[0]
                not_done.remove(id)
            except Exception as e:
                #print(e)
                pass
        time.sleep(2)
    print("Recomputing not dones", not_done)
    futures2 = pwex.map(shard_mat_vec_mul, not_done)
    f_result_dones2 = []
    while not_done != []:
        fs_dones2, fs_not_dones2 = pywren.wait(futures2, 3)
        for f in list(set(fs_dones2) - set(f_result_dones2)):
            f_result_dones2.append(f)
            try:
                result = f.result()
                id = result[1]
                if id in not_done:
                    print("Recomputed", id)
                    Ax[id * shard_size:(id + 1) * shard_size] = result[0]
                    not_done.remove(id)
            except Exception as e:
                #print(e)
                pass
        time.sleep(2)
        fs_dones, fs_not_dones = pywren.wait(futures, 3)
        for f in list(set(fs_dones) - set(f_result_dones)):
            f_result_dones.append(f)
            try:
                result = f.result()
                id = result[1]
                if id in not_done:
                    print("Straggler computed", id)
                    Ax[id * shard_size:(id + 1) * shard_size] = result[0]
                    not_done.remove(id)
            except Exception as e:
                #print(e)
                pass
        time.sleep(2)
        if fs_not_dones2 == [] and fs_not_dones == []:
            print("NOT DONE", not_done)
            break
    print("Recomputing done")
    return Ax