Example #1
0

def my_map_function(x):
    return x + 7


def my_reduce_function(results):
    total = 0
    for map_result in results:
        total = total + map_result
    return total


if __name__ == "__main__":
    """
    By default the reducer will be launched within a Cloud Function
    when the local Lithops have all the results from the mappers.
    """
    pw = lithops.ibm_cf_executor()
    pw.map_reduce(my_map_function, iterdata, my_reduce_function)
    print(pw.get_result())
    """
    Set 'reducer_wait_local=True' to wait for the results locally.
    """
    pw = lithops.ibm_cf_executor()
    pw.map_reduce(my_map_function,
                  iterdata,
                  my_reduce_function,
                  reducer_wait_local=True)
    print(pw.get_result())
Example #2
0
    # upload results to IBM COS
    res = ['confout.gro', 'ener.edr', 'md.log', 'state.cpt']
    for name in res:
        f = open(os.path.join(temp_dir, name), "rb")
        ibm_cos.put_object(Bucket=bucket,
                           Key=os.path.join('gmx-mem', name),
                           Body=f)

    with open('md.log', 'r') as file:
        data = file.read()

    return {'run_time': run_time, 'md_log': data}


if __name__ == '__main__':
    # Example of using bechMEM from https://www.mpibpc.mpg.de/grubmueller/bench

    param1 = 'param1 example'

    total_start = time.time()
    pw = lithops.ibm_cf_executor(runtime='cactusone/lithops-gromacs:1.0.2',
                                 runtime_memory=2048)
    pw.map(sh_cmd_executor, iterdata, extra_args=(param1, ))
    res = pw.get_result()
    pw.clean()

    print("GROMACS execution time {}".format(res[0]['run_time']))
    print("Total execution time {}".format(time.time() - total_start))
    print(res[0]['md_log'])
Example #3
0
def scheduler(total):
    iterdata = range(total)
    pw = lithops.ibm_cf_executor()
    return pw.map(my_map_function, iterdata)
def my_function(x):
    iterdata = range(x)
    pw = lithops.ibm_cf_executor()
    return pw.map(my_map_function, iterdata)
Example #5
0
    co2 = storage.put_cobject('Cloudobject test 2: {}'.format(text, ))
    return [co1, co2]


def my_function_get(co, storage):
    data = storage.get_cobject(co)
    return data


if __name__ == "__main__":
    """
    Managing cloudobjects with context manager.
    At the end of the with statement all
    cloudobjects are automatically deleted.
    """
    with lithops.ibm_cf_executor() as pw:
        pw.call_async(my_function_put, 'Hello World')
        cloudobjects = pw.get_result()
        pw.map(my_function_get, cloudobjects)
        print(pw.get_result())

    """
    Managing cloudobjects without context manager.
    pw.clean() must be called at the end to delete
    the cloudobjects created in the same executor as
    long as you used the default location.
    Alternatively, you can call pw.clean(cs=cloudobjects)
    to delete a specific list of cloudobjects.
    pw.clean(cs=cloudobjects) is mandatory if you created
    the cloudobjects in a custom location.
    """
Example #6
0
"""
Simple Lithops example using rabbitmq to wait map function invocations
RabbitMQ amqp_url must be in configuration to make it working.
"""
import lithops
import time

total = 10


def my_function(x):
    time.sleep(2)
    return x + 7


if __name__ == "__main__":
    pw = lithops.ibm_cf_executor(runtime_memory=256)
    pw.map(my_function, range(total))
    pw.wait()  # blocks current execution until all function activations finish
    pw.clean()

    # Activate RabbitMQ as a monitoring system
    pw = lithops.ibm_cf_executor(runtime_memory=256, rabbitmq_monitor=True)
    pw.map(my_function, range(total))
    pw.wait()  # blocks current execution until all function activations finish
    pw.clean()