Ejemplo n.º 1
0
def login(config, url=None, user=None, name=None, password=None):
    """Log in to your MicroFunctions platform"""
    default = MfnClient.load_json()
    default = MfnClient.load_env(default)

    # sanitize input
    if url and not url.startswith('http'):
        print("WARNING: please use a format 'http://<hostname>:<port>' or 'https://...' for the url")
        if url.endswith(':443'):
            url = 'https://'+url
        else:
            url = 'http://'+url

    # overwrite values if provided
    config.mfn_url = url or config.mfn_url or default.get('mfn_url', 'https://microfunctions.org')
    config.mfn_user = user or config.mfn_user or default.get('mfn_user')
    config.mfn_name = name or config.mfn_name or default.get('mfn_name')
    config.mfn_password = password or config.mfn_password or default.get('mfn_password')
    if config.mfn_user is None:
        config.mfn_user = click.prompt(f"Enter username for {config.mfn_url}")
    if config.mfn_password is None:
        config.mfn_password = getpass.getpass(f"Enter password for {config.mfn_user}: ")
    try:
        client = config.get_client()
        print(f"Successfully logged into {config.mfn_url} as user {config.mfn_user}!")
        return client
    except Exception as e:
        config.mfn_password = None
        print(str(e))
Ejemplo n.º 2
0
    def __init__(self,
                 test_name=None,
                 timeout=None,
                 workflow_filename=None,
                 new_user=False,
                 delete_user=False):

        self._settings = self._get_settings()

        if new_user:
            random_str = str(random.randint(0, 10000)) + "_" + str(time.time())
            random_user = hashlib.sha256(random_str.encode()).hexdigest()
            random_user = "******" + random_user[0:31] + "@knix.io"
            print("User: "******"User: "******"mfn_user"])
            self._client = MfnClient()

        if workflow_filename is None:
            self._workflow_filename = self._settings[
                "workflow_description_file"]
        else:
            self._workflow_filename = workflow_filename

        ind = self._workflow_filename.rfind("/")
        if ind != -1:
            self._workflow_folder = self._workflow_filename[:ind + 1]
        else:
            self._workflow_folder = "./"
        print("Workflow folder: " + self._workflow_folder)

        self._workflow_description = self._get_json_file(
            self._workflow_filename)

        if "name" in self._workflow_description:
            self._workflow_name = self._workflow_description["name"]
        else:
            self._workflow_name = self._workflow_filename[
                0:self._workflow_filename.rfind(".")]

        if test_name is not None:
            self._test_name = test_name
        else:
            self._test_name = self._workflow_filename

        if timeout is not None:
            self._settings["timeout"] = timeout

        self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0)

        # will be the deployed workflow object in self._client
        self._workflow = None
        self._deployment_error = ""

        self._workflow_resources = []

        self.upload_workflow()
        self.deploy_workflow()
Ejemplo n.º 3
0
def login(config, url=None, user=None, name=None, password=None):
    """Log in to your MicroFunctions platform"""
    default = MfnClient.load_json()
    default = MfnClient.load_env(default)

    # sanitize input
    if url and not url.startswith('http'):
        print(
            "WARNING: please use a format 'http://<hostname>:<port>' or 'https://...' for the url"
        )
        if url.endswith(':443'):
            url = 'https://' + url
        else:
            url = 'http://' + url

    # overwrite values if provided
    config.mfn_url = url or config.mfn_url or default.get(
        'mfn_url', 'https://knix.io/mfn')
    config.mfn_user = user or config.mfn_user or default.get('mfn_user')
    config.mfn_name = name or config.mfn_name or default.get('mfn_name')
    config.mfn_password = password or config.mfn_password or default.get(
        'mfn_password')
    if config.mfn_user is None:
        config.mfn_user = click.prompt(f"Enter username for {config.mfn_url}")
    if config.mfn_password is None:
        config.mfn_password = getpass.getpass(
            f"Enter password for {config.mfn_user}: ")
    config.proxies = default.get('proxies', None)
    if config.proxies is None:
        proxies = dict()
        if 'http_proxy' in sys.environ:
            config.proxies['http'] = sys.environ['http_proxy']
        elif 'HTTP_PROXY' in sys.environ:
            config.proxies['http'] = sys.environ['HTTP_PROXY']
        if 'https_proxy' in sys.environ:
            config.proxies['https'] = sys.environ['https_proxy']
        elif 'HTTPS_PROXY' in sys.environ:
            config.proxies['https'] = sys.environ['HTTPS_PROXY']
        if len(proxies) > 0:
            config.proxies = proxies
    try:
        client = config.get_client()
        print(
            f"Successfully logged into {config.mfn_url} as user {config.mfn_user}!"
        )
        return client
    except Exception as e:
        config.mfn_password = None
        print(str(e))
Ejemplo n.º 4
0
 def get_client(self):
     if not (self.mfn_url and self.mfn_user and self.mfn_password):
         print("Please login")
         sys.exit(2)
     else:
         return MfnClient(self.mfn_url, self.mfn_user, self.mfn_password,
                          self.mfn_name, self.proxies)
Ejemplo n.º 5
0
 def setUp(self):
     self._settings = self._get_settings()
     self._client = MfnClient()
Ejemplo n.º 6
0
class StorageActionsTest(unittest.TestCase):

    def setUp(self):
        self._settings = self._get_settings()
        self._client = MfnClient()

    def test_list_keys(self):
        key_list = self._client.list_keys()

        old_len = len(key_list)

        ts = str(time.time() * 1000.0)
        key = "my_random_key_" + ts

        self._client.put(key, ts)

        key_list2 = self._client.list_keys()

        new_len = len(key_list2)

        if (old_len+1) == new_len:
            self._report("test_list_keys", True)
        else:
            self._report("test_list_keys", False, old_len + 1, new_len)

    def test_get_put_delete(self):
        ts = str(time.time() * 1000.0)
        key = "my_random_key_" + ts
        val = self._client.get(key)

        # should be None
        if val is None:
            self._report("test_get_non-existing_key", True)
        else:
            self._report("test_get_non-existing_key", False, None, val)

        self._client.put(key, ts)
        val2 = self._client.get(key)

        # should be ts
        if val2 == ts:
            self._report("test_get_existing_key", True)
        else:
            self._report("test_get_existing_key", False, ts, val2)

        self._client.delete(key)
        val3 = self._client.get(key)

        # should be None
        if val3 is None:
            self._report("test_delete_key", True)
        else:
            self._report("test_delete_key", False, None, val3)

    def tearDown(self):
        self._client.disconnect()

    # internal functions

    def _get_json_file(self, filename):
        json_data = {}
        if os.path.isfile(filename):
            with open(filename) as json_file:
                json_data = json.load(json_file)
        return json_data

    def _get_settings(self):
        settings = {}
        # read default global settings files
        settings.update(self._get_json_file("../settings.json"))

        # read test specific settings
        settings.update(self._get_json_file("settings.json"))

        if len(settings) == 0:
            raise Exception("Empty settings")

        # Defaults
        settings.setdefault("timeout", 60)

        return settings

    def _report(self, test_name, success, expected=None, actual=None):
        if success:
            print(test_name + " test " + mfntestpassed)
        else:
            print(test_name + " test " + mfntestfailed + '(result: ' + json.dumps(actual) + ', expected: ' + json.dumps(expected) + ')')
Ejemplo n.º 7
0
Archivo: zip.py Proyecto: ztz1989/knix
"""
  zip: a script that exemplifies the upload of a function's custom ZIP file
"""
import requests
import os
import base64
import sys
import datetime

from zipfile import ZipFile
from mfn_sdk import MfnClient

import logging
logging.basicConfig(level=logging.DEBUG)

c = MfnClient('https://knix.io', '*****@*****.**', 'test123', proxies={})
"""
This example uploads a given ZIP file to a function
"""

# Create a new function
g = c.add_function('custom')

# Create a zip file from the directory contents
zip_name = "custom_function.zip"
if os.path.exists(zip_name):
    os.remove(zip_name)
for root, dirs, files in os.walk('.'):
    with ZipFile(zip_name, 'w') as zf:
        for fn in files:
            zf.write(fn)
Ejemplo n.º 8
0
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
"""
  backup: a script that downloads all workflows, functions and data of a 
          MicroFunctions account and pickles the main info to a file
"""
import requests
import os
import base64
import pickle
import sys

from mfn_sdk import MfnClient

c = MfnClient()

data = dict()

with open("backup.p", "wb") as backup:
    fns = c.functions
    for f in fns:
        f.source  # to retrieve _code _zip and _metadata
        fobj = {
            '_runtime': f._runtime,
            '_name': f._name,
            '_modified': f._modified,
            'requirements': f.requirements,
            'source': f.source
        }
        pickle.dump(fobj, backup)
Ejemplo n.º 9
0
class StorageActionsTest(unittest.TestCase):
    def setUp(self):
        self._settings = self._get_settings()
        self._client = MfnClient()

    # kv operations
    #@unittest.skip("")
    def test_get_put_delete(self):
        key_list = self._client.list_keys()
        old_len = len(key_list)

        ts = str(time.time() * 1000.0)
        key = "my_random_key_" + ts
        val = self._client.get(key)

        # should be None
        if val is None:
            self._report("test_get_non-existing_key", True)
        else:
            self._report("test_get_non-existing_key", False, None, val)

        self._client.put(key, ts)
        val2 = self._client.get(key)

        key_list2 = self._client.list_keys()
        new_len = len(key_list2)

        if (old_len + 1) == new_len:
            self._report("test_list_keys", True)
        else:
            self._report("test_list_keys", False, old_len + 1, new_len)

        # should be ts
        if val2 == ts:
            self._report("test_get_existing_key", True)
        else:
            self._report("test_get_existing_key", False, ts, val2)

        self._client.delete(key)
        val3 = self._client.get(key)

        # should be None
        if val3 is None:
            self._report("test_delete_key", True)
        else:
            self._report("test_delete_key", False, None, val3)

    # map operations
    def test_map_operations(self):
        map_list = self._client.list_maps()
        old_len = len(map_list)

        ts = str(time.time() * 1000.0)
        mapname = "my_random_mapname_" + ts

        rval = ts + "_" + str(random.randint(0, 1000000))
        rkey = "my_random_key_" + str(random.randint(0, 1000000))
        rkey2 = "my_random_key_" + str(random.randint(0, 1000000))
        rkey3 = "my_random_key_" + str(random.randint(0, 1000000))

        self._client.create_map(mapname)
        # the creation of a map doesn't actually take place unless key-value pair is added
        self._client.put_map_entry(mapname, rkey, rval)

        time.sleep(3)
        map_list2 = self._client.list_maps()
        new_len = len(map_list2)

        if (old_len + 1) == new_len:
            self._report("test_create_map", True)
            self._report("test_list_maps", True)
        else:
            self._report("test_create_map", False, old_len + 1, new_len)
            self._report("test_list_maps", False, old_len + 1, new_len)

        val = self._client.get_map_entry(mapname, rkey)
        val_none = self._client.get_map_entry(mapname, rkey2)

        if val == rval and val_none is None:
            self._report("test_get_map_entry", True)
            self._report("test_put_map_entry", True)
        else:
            self._report("test_get_map_entry", False, val, rval)
            self._report("test_put_map_entry", False, val, rval)

        self._client.put_map_entry(mapname, rkey2, rval)
        self._client.put_map_entry(mapname, rkey3, rval)

        mapentries = self._client.retrieve_map(mapname)

        if all (k in mapentries.keys() for k in (rkey, rkey2, rkey3)) and\
            all (v == rval for v in mapentries.values()):
            self._report("test_retrieve_map", True)
        else:
            self._report("test_retrieve_map", False, mapentries, {
                rkey: rval,
                rkey2: rval,
                rkey3: rval
            })

        mapkeys = self._client.get_map_keys(mapname)

        if all(k in mapkeys for k in mapentries.keys()):
            self._report("test_get_map_keys", True)
        else:
            self._report("test_get_map_keys", False, mapkeys,
                         mapentries.keys())

        contains = self._client.contains_map_key(mapname, rkey)
        contains2 = self._client.contains_map_key(mapname, rkey2)

        self._client.delete_map_entry(mapname, rkey2)

        contains3 = self._client.contains_map_key(mapname, rkey2)

        if contains and contains2 and not contains3:
            self._report("test_contains_map_key", True)
            self._report("test_delete_map_key", True)
        else:
            self._report("test_contains_map_key", False, True, True)
            self._report("test_delete_map_key", False, contains3, False)

        self._client.clear_map(mapname)

        mapkeys2 = self._client.get_map_keys(mapname)

        if not mapkeys2:
            self._report("test_clear_map", True)
        else:
            self._report("test_clear_map", False, mapkeys2, [])

        self._client.delete_map(mapname)
        time.sleep(3)

        map_list3 = self._client.list_maps()
        new_len2 = len(map_list3)

        if old_len == new_len2 and new_len == new_len2 + 1:
            self._report("test_delete_map", True)
        else:
            self._report("test_delete_map", False, new_len2, old_len)

    # set operations
    #@unittest.skip("")
    def test_set_operations(self):
        set_list = self._client.list_sets()
        old_len = len(set_list)

        ts = str(time.time() * 1000.0)
        setname = "my_random_setname_" + ts

        ts2 = str(time.time() * 1000.0)
        ritem = "my_random_item_" + ts2

        self._client.create_set(setname)
        # the creation of a set doesn't actually take place unless an item is added
        self._client.add_set_entry(setname, ritem)

        time.sleep(3)
        set_list2 = self._client.list_sets()
        new_len = len(set_list2)

        if (old_len + 1) == new_len:
            self._report("test_create_set", True)
            self._report("test_list_sets", True)
        else:
            self._report("test_create_set", False, old_len + 1, new_len)
            self._report("test_list_sets", False, old_len + 1, new_len)

        contains = self._client.contains_set_item(setname, ritem)

        if contains:
            self._report("test_add_set_entry", True)
        else:
            self._report("test_add_set_entry", False, None, True)

        content = self._client.retrieve_set(setname)

        if isinstance(content, set) and ritem in content:
            self._report("test_retrieve_set", True)
        else:
            self._report("test_retrieve_set", False, ritem in content, True)

        self._client.remove_set_entry(setname, ritem)

        content2 = self._client.retrieve_set(setname)
        contains2 = self._client.contains_set_item(setname, ritem)

        if not contains2 and ritem not in content2:
            self._report("test_remove_set_entry", True)
            self._report("test_retrieve_set", True)
        else:
            self._report("test_remove_set_entry", False, contains2, False)
            self._report("test_retrieve_set", False, ritem in content2, False)

        self._client.add_set_entry(setname, "randomitem1")
        self._client.add_set_entry(setname, "randomitem2")
        self._client.add_set_entry(setname, "randomitem3")
        self._client.add_set_entry(setname, "randomitem4")
        self._client.add_set_entry(setname, "randomitem5")

        content3 = self._client.retrieve_set(setname)

        self._client.clear_set(setname)

        content4 = self._client.retrieve_set(setname)

        if len(content3) == 5 and len(content4) == 0:
            self._report("test_clear_set", True)
        else:
            self._report("test_clear_set", False, len(content4), 0)

        self._client.delete_set(setname)
        time.sleep(3)

        set_list3 = self._client.list_sets()
        new_len2 = len(set_list3)

        if old_len == new_len2 and new_len == new_len2 + 1:
            self._report("test_delete_set", True)
        else:
            self._report("test_delete_set", False, new_len2, old_len)

    # counter operations
    #@unittest.skip("")
    def test_create_get_increment_decrement_delete_counter(self):
        counter_list = self._client.list_counters()
        old_len = len(counter_list)
        ts = str(time.time() * 1000.0)
        countername = "my_random_countername_" + ts

        rval = random.randint(0, 100)

        self._client.create_counter(countername, rval)

        counter_list2 = self._client.list_counters()
        new_len = len(counter_list2)

        if (old_len + 1) == new_len:
            self._report("test_list_counters", True)
        else:
            self._report("test_list_counters", False, old_len + 1, new_len)

        if countername not in counter_list and countername in counter_list2:
            self._report("test_create_counter", True)
        else:
            self._report("test_create_counter", False, None, countername)

        val = self._client.get_counter(countername)

        if val == rval:
            self._report("test_get_counter", True)
        else:
            self._report("test_get_counter", False, rval, val)

        r2 = random.randint(0, 100)
        self._client.increment_counter(countername, r2)

        val2 = self._client.get_counter(countername)

        if val2 == val + r2:
            self._report("test_increment_counter", True)
        else:
            self._report("test_increment_counter", False, val + r2, val2)

        r3 = random.randint(0, 100)
        self._client.decrement_counter(countername, r3)

        val3 = self._client.get_counter(countername)

        if val3 == val2 - r3:
            self._report("test_decrement_counter", True)
        else:
            self._report("test_decrement_counter", False, val2 - r3, val3)

        self._client.delete_counter(countername)

        # sleep a little to make the change to take effect
        time.sleep(3)

        counter_list3 = self._client.list_counters()

        if countername not in counter_list3:
            self._report("test_delete_counter", True)
        else:
            self._report("test_delete_counter", False, None, countername)

    def tearDown(self):
        self._client.disconnect()

    # internal functions

    def _get_json_file(self, filename):
        json_data = {}
        if os.path.isfile(filename):
            with open(filename) as json_file:
                json_data = json.load(json_file)
        return json_data

    def _get_settings(self):
        settings = {}
        # read default global settings files
        settings.update(self._get_json_file("../settings.json"))

        # read test specific settings
        settings.update(self._get_json_file("settings.json"))

        if len(settings) == 0:
            raise Exception("Empty settings")

        # Defaults
        settings.setdefault("timeout", 60)

        return settings

    def _report(self, test_name, success, expected=None, actual=None):
        if success:
            print(test_name + " test " + mfntestpassed)
        else:
            print(test_name + " test " + mfntestfailed + '(result: ' +
                  json.dumps(actual) + ', expected: ' + json.dumps(expected) +
                  ')')
Ejemplo n.º 10
0
class MFNTest():
    def __init__(self,
                 test_name=None,
                 timeout=None,
                 workflow_filename=None,
                 new_user=False,
                 delete_user=False):

        self._settings = self._get_settings()

        if new_user:
            random_str = str(random.randint(0, 10000)) + "_" + str(time.time())
            random_user = hashlib.sha256(random_str.encode()).hexdigest()
            random_user = "******" + random_user[0:31] + "@knix.io"
            print("User: "******"User: "******"mfn_user"])
            self._client = MfnClient()

        if workflow_filename is None:
            self._workflow_filename = self._settings[
                "workflow_description_file"]
        else:
            self._workflow_filename = workflow_filename

        ind = self._workflow_filename.rfind("/")
        if ind != -1:
            self._workflow_folder = self._workflow_filename[:ind + 1]
        else:
            self._workflow_folder = "./"
        #print("Workflow folder: " + self._workflow_folder)

        self._workflow_description = self._get_json_file(
            self._workflow_filename)

        if "name" in self._workflow_description:
            self._workflow_name = self._workflow_description["name"]
        else:
            self._workflow_name = self._workflow_filename[
                0:self._workflow_filename.rfind(".")]

        if test_name is not None:
            self._test_name = test_name
        else:
            self._test_name = self._workflow_filename

        if timeout is not None:
            self._settings["timeout"] = timeout

        self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0)

        # will be the deployed workflow object in self._client
        self._workflow = None
        self._deployment_error = ""

        self._workflow_resources = []

        self.upload_workflow()
        self.deploy_workflow()

    def _get_json_file(self, filename):
        json_data = {}
        if os.path.isfile(filename):
            with open(filename) as json_file:
                json_data = json.load(json_file)
        return json_data

    def _get_settings(self):
        settings = {}
        # read default global settings files
        settings.update(self._get_json_file("../settings.json"))

        # read test specific settings
        settings.update(self._get_json_file("settings.json"))

        if len(settings) == 0:
            raise Exception("Empty settings")

        # Defaults
        settings.setdefault("timeout", 60)

        return settings

    def _get_resource_info(self, resource_ref):
        #dir_list = next(os.walk('.'))[1]
        dir_list = next(os.walk(self._workflow_folder))[1]
        is_zip = False
        is_jar = False
        runtime = ""
        found = False
        if "zips" in dir_list:
            resource_filename = self._workflow_folder + "zips/" + resource_ref + ".zip"
            if os.path.isfile(resource_filename):
                found = True
                runtime = "Python 3.6"
                is_zip = True

        if not found:
            if "python" in dir_list:
                resource_filename = self._workflow_folder + "python/" + resource_ref + ".py"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Python 3.6"
            else:
                resource_filename = self._workflow_folder + resource_ref + ".py"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Python 3.6"

        if not found and "jars" in dir_list:
            resource_filename = self._workflow_folder + "jars/" + resource_ref + ".jar"
            if os.path.isfile(resource_filename):
                found = True
                runtime = "Java"
                is_jar = True

        if not found:
            if "java" in dir_list:
                resource_filename = self._workflow_folder + "java/" + resource_ref + ".java"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Java"
            else:
                resource_filename = self._workflow_folder + resource_ref + ".java"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Java"

        retval = {}
        retval["resource_filename"] = resource_filename
        retval["resource_runtime"] = runtime
        retval["is_zip"] = is_zip
        retval["is_jar"] = is_jar
        return retval

    def _get_resource_info_map(self,
                               workflow_description=None,
                               resource_info_map=None):
        if workflow_description is None:
            workflow_description = self._workflow_description
        if resource_info_map is None:
            resource_info_map = {}

        if "functions" in self._workflow_description:
            workflow_functions = workflow_description["functions"]
            for wf_function in workflow_functions:
                if "name" in wf_function:
                    resource_name = wf_function["name"]
                    resource_ref = resource_name
                    if "resource" in wf_function:
                        resource_ref = wf_function["resource"]

                    if resource_ref not in resource_info_map.keys():
                        resource_info = self._get_resource_info(resource_ref)
                        resource_info[
                            "resource_req_filename"] = "requirements/" + resource_ref + "_requirements.txt"
                        resource_info[
                            "resource_env_filename"] = "environment_variables/" + resource_ref + "_environment_variables.txt"
                        resource_info_map[resource_ref] = resource_info

        elif "States" in workflow_description:
            states = workflow_description["States"]
            for sname in states:
                state = states[sname]
                if "Resource" in state:
                    resource_name = state["Resource"]

                    if resource_name not in resource_info_map.keys():
                        resource_info = self._get_resource_info(resource_name)
                        resource_info[
                            "resource_req_filename"] = "requirements/" + resource_name + "_requirements.txt"
                        resource_info[
                            "resource_env_filename"] = "environment_variables/" + resource_name + "_environment_variables.txt"
                        resource_info_map[resource_name] = resource_info

                if "Type" in state and state["Type"] == "Parallel":
                    branches = state['Branches']
                    for branch in branches:
                        resource_info_map = self._get_resource_info_map(
                            branch, resource_info_map)

                if "Type" in state and state["Type"] == "Map":
                    branch = state['Iterator']
                    #print(str(branch))
                    resource_info_map = self._get_resource_info_map(
                        branch, resource_info_map)
                    #print(str(resource_info_map))

        else:
            print("ERROR: invalid workflow description.")
            assert False

        return resource_info_map

    def _delete_resource_if_existing(self, existing_resources, resource_name):
        for g in existing_resources:
            if g.name == resource_name:
                self._client.delete_function(g)
                break
        print("deleted resource: " + resource_name)

    def _create_and_upload_resource(self, resource_name, resource_info):
        print("Deploying resource: " + resource_name)

        resource_filename = resource_info["resource_filename"]
        is_zip = resource_info["is_zip"]
        is_jar = resource_info["is_jar"]
        resource_req_filename = resource_info["resource_req_filename"]
        resource_env_filename = resource_info["resource_env_filename"]
        resource_runtime = resource_info["resource_runtime"]

        self._workflow_resources.append(resource_name)

        try:
            # add the resource
            g = self._client.add_function(resource_name,
                                          runtime=resource_runtime)

            # upload the resource source
            print('Uploading file: ' + resource_filename)
            if is_zip or is_jar:
                g.upload(resource_filename)
            else:
                source_text = ''
                with open(resource_filename, 'r') as f:
                    source_text = f.read()
                g.source = {"code": source_text}

            # upload the resource requirements
            if os.path.isfile(resource_req_filename):
                with open(resource_req_filename, "r") as f:
                    reqs = f.read().strip()
                    g.requirements = reqs
                    #print("set requirements for function: " + resource_name + " " + reqs)

            # resource environment variables
            # upload the resource environment variables
            if os.path.isfile(resource_env_filename):
                with open(resource_env_filename, "r") as f:
                    env_vars = f.read().strip()
                    g.environment_variables = env_vars
                    #print("set environment variables for function: " + resource_name + " " + env_vars)

        except Exception as e:
            print("ERROR: Could not create resource.")
            print(str(e))
            assert False

    def upload_workflow(self):
        self.undeploy_workflow()

        resource_info_map = self._get_resource_info_map()

        existing_resources = self._client.functions

        for resource_name in resource_info_map.keys():
            self._delete_resource_if_existing(existing_resources,
                                              resource_name)

            resource_info = resource_info_map[resource_name]

            self._create_and_upload_resource(resource_name, resource_info)

    def get_deployment_error(self):
        return self._deployment_error

    def deploy_workflow(self):
        try:
            wf = self._client.add_workflow(self._workflow_name)
            wf.json = json.dumps(self._workflow_description)
            wf.deploy(self._settings["timeout"])
            self._workflow = wf
            if self._workflow.status != "failed":
                print("MFN workflow " + self._workflow_name + " deployed.")
            else:
                print("MFN workflow " + self._workflow_name +
                      " could not be deployed.")
                self._deployment_error = self._workflow.get_deployment_error()
        except Exception as e:
            print("ERROR: Could not deploy workflow.")
            raise e
            assert False

    def undeploy_workflow(self):
        existing_workflows = self._client.workflows
        for wf in existing_workflows:
            if wf.name == self._workflow_name:
                if wf.status == "deployed":
                    wf.undeploy(self._settings["timeout"])
                    print("Workflow undeployed.")
                self._client.delete_workflow(wf)
                break

        existing_resources = self._client.functions

        for resource_name in self._workflow_resources:
            self._delete_resource_if_existing(existing_resources,
                                              resource_name)

    def get_test_workflow_endpoints(self):
        if self._workflow.status == "deployed":
            return self._workflow.endpoints

    def execute(self,
                message,
                timeout=None,
                check_duration=False,
                async=False):
        if timeout is None:
            timeout = self._settings["timeout"]
        if async:
            return self._workflow.execute_async(message, timeout)
        else:
            return self._workflow.execute(message, timeout, check_duration)
Ejemplo n.º 11
0
#   Copyright 2020 The KNIX Authors
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import base64
from mfn_sdk import MfnClient

c = MfnClient('https://knix.io/mfn', '*****@*****.**', 'test123', proxies={''})

print("Retrieving all objects")
for key in list(c.keys()):
    print(key, c.get(key))
Ejemplo n.º 12
0
#   See the License for the specific language governing permissions and
#   limitations under the License.
"""
  trigger: a script that sets up a triggerable bucket and a function and workflow
  
  The workflow is associated with the triggerable bucket.
  Upon writing to the triggerable bucket, the workflow is executed.
  The function then writes the data to the general storage.
  The script tries to retrieve the data from the general storage.
"""
import base64
import time

from mfn_sdk import MfnClient

c = MfnClient()

function = c.add_function("transform")
function.code = """
def handle(event, context):
    context.log("Triggered "+str(event))

    if 'key' in event and 'value' in event:
      context.put(event['key'], event['value'])

    return None
"""

workflow = c.add_workflow("workflow")
workflow.json = """{
  "name": "workflow",
Ejemplo n.º 13
0
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
"""
  deploy: a script that sees for a workflow to be undeployed and deployed again
"""
import time
import logging
from mfn_sdk import MfnClient

c = MfnClient()

logging.basicConfig(level=logging.DEBUG)

workflow_name = "echo_wf"

wf = None
for w in c.workflows:
    if w.name == workflow_name:
        wf = w
        break

# Just an example of undeploying
print("Workflow", wf.name, "is seen to be undeployed")

wf.undeploy()
Ejemplo n.º 14
0
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
"""
  clear: a script that DELETES ALL workflows, functions and objects of a 
          MicroFunctions account
"""
import getpass

from mfn_sdk import MfnClient

c = MfnClient()

print("URL:  ", c.url)
print("USER: "******"THIS CLEARS ALL FUNCTIONS, WORKFLOWS AND DATA IN YOUR ACCOUNT")
if not input("Are you sure? (y/N): ").lower().strip()[:1] == "y": sys.exit(1)

for w in c.workflows:
    print("Deleting workflow", w.name)
    c.delete_workflow(w)
for g in c.functions:
    print("Deleting function", g.name)
    c.delete_function(g)
for k in list(c.keys()):
    print("Deleting object", k)
    c.delete(k)
Ejemplo n.º 15
0
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
"""
  transfer: a script that transfers all functions, workflows and objects from
            an existing account at one microfunctions platform to another
"""
from mfn_sdk import MfnClient

import logging
logging.basicConfig(level=logging.DEBUG)

# The account to read from (tries to find default settings)
c1 = MfnClient()

# The account to write to
c2 = MfnClient.load_json(filename="settings_target.json")

print("Copying all contents of")
print("User", c1.user, "at microfunctions", c1.url)
print(" TO")
print("User", c2.user, "at microfunctions", c2.url)

for fn1 in c1.functions:
    print("Syncing function", fn1.name)
    fn2 = c2.add_function(fn1.name, fn1.runtime)
    s = fn1.source
    if 'zip' in s:
        print("Function ", fn1.name, "has type zip with", str(len(s['zip'])),
Ejemplo n.º 16
0
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import time
import logging
from mfn_sdk import MfnClient

c = MfnClient()

logging.basicConfig(level=logging.DEBUG)

fn = c.add_function("echo")
fn.source = {
    'code':
    """
def handle(event, context):
    context.log("Echoing event: "+str(event))
    return event
"""
}

workflow = c.add_workflow("echo_wf")
workflow.json = """{
Ejemplo n.º 17
0
class MFNTest():
    def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_user=False):

        self._settings = self._get_settings()

        if new_user:
            random_str = str(random.randint(0, 10000)) + "_" + str(time.time())
            random_user = hashlib.sha256(random_str.encode()).hexdigest()
            random_user = "******" + random_user[0:31] + "@knix.io"
            print("User: "******"User: "******"mfn_user"])
            self._client = MfnClient()

        if workflow_filename is None:
            self._workflow_filename = self._settings["workflow_description_file"]
        else:
            self._workflow_filename = workflow_filename

        ind = self._workflow_filename.rfind("/")
        if ind != -1:
            self._workflow_folder = self._workflow_filename[:ind+1]
        else:
            self._workflow_folder = "./"
        print("Workflow folder: " + self._workflow_folder)

        self._workflow_description = self._get_json_file(self._workflow_filename)

        if "name" in self._workflow_description:
            self._workflow_name = self._workflow_description["name"]
        else:
            self._workflow_name = self._workflow_filename[0:self._workflow_filename.rfind(".")]

        if test_name is not None:
            self._test_name = test_name
        else:
            self._test_name = self._workflow_filename

        if timeout is not None:
            self._settings["timeout"] = timeout

        self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0)

        # will be the deployed workflow object in self._client
        self._workflow = None
        self._deployment_error = ""

        self._workflow_resources = []

        self.upload_workflow()
        self.deploy_workflow()

    def _get_json_file(self, filename):
        json_data = {}
        if os.path.isfile(filename):
            with open(filename) as json_file:
                json_data = json.load(json_file)
        return json_data

    def _get_settings(self):
        settings = {}
        # read default global settings files
        settings.update(self._get_json_file("../settings.json"))

        # read test specific settings
        settings.update(self._get_json_file("settings.json"))

        if len(settings) == 0:
            raise Exception("Empty settings")

        return settings

    def _get_resource_info(self, resource_ref):
        #dir_list = next(os.walk('.'))[1]
        dir_list = next(os.walk(self._workflow_folder))[1]
        is_zip = False
        is_jar = False
        runtime = ""
        found = False
        if "zips" in dir_list:
            resource_filename = self._workflow_folder + "zips/" + resource_ref + ".zip"
            if os.path.isfile(resource_filename):
                found = True
                runtime = "Python 3.6"
                is_zip = True

        if not found:
            if "python" in dir_list:
                resource_filename = self._workflow_folder + "python/" + resource_ref + ".py"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Python 3.6"
            else:
                resource_filename = self._workflow_folder + resource_ref + ".py"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Python 3.6"

        if not found and "jars" in dir_list:
            resource_filename = self._workflow_folder + "jars/" + resource_ref + ".jar"
            if os.path.isfile(resource_filename):
                found = True
                runtime = "Java"
                is_jar = True

        if not found:
            if "java" in dir_list:
                resource_filename = self._workflow_folder + "java/" + resource_ref + ".java"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Java"
            else:
                resource_filename = self._workflow_folder + resource_ref + ".java"
                if os.path.isfile(resource_filename):
                    found = True
                    runtime = "Java"

        retval = {}
        retval["resource_filename"] = resource_filename
        retval["resource_runtime"] = runtime
        retval["is_zip"] = is_zip
        retval["is_jar"] = is_jar
        return retval

    def _get_resource_info_map(self, workflow_description=None, resource_info_map=None):
        if workflow_description is None:
            workflow_description = self._workflow_description
        if resource_info_map is None:
            resource_info_map = {}

        if "functions" in self._workflow_description:
            workflow_functions = workflow_description["functions"]
            for wf_function in workflow_functions:
                if "name" in wf_function:
                    resource_name = wf_function["name"]
                    resource_ref = resource_name
                    if "resource" in wf_function:
                        resource_ref = wf_function["resource"]

                    if resource_ref not in resource_info_map.keys():
                        resource_info = self._get_resource_info(resource_ref)
                        resource_info["resource_req_filename"] = "requirements/" + resource_ref + "_requirements.txt"
                        resource_info["resource_env_filename"] = "environment_variables/" + resource_ref + "_environment_variables.txt"
                        resource_info_map[resource_ref] = resource_info

        elif "States" in workflow_description:
            states = workflow_description["States"]
            for sname in states:
                state = states[sname]
                if "Resource" in state:
                    resource_name = state["Resource"]

                    if resource_name not in resource_info_map.keys():
                        resource_info = self._get_resource_info(resource_name)
                        resource_info["resource_req_filename"] = "requirements/" + resource_name + "_requirements.txt"
                        resource_info["resource_env_filename"] = "environment_variables/" + resource_name + "_environment_variables.txt"
                        resource_info_map[resource_name] = resource_info

                if "Type" in state and state["Type"] == "Parallel":
                    branches = state['Branches']
                    for branch in branches:
                        resource_info_map = self._get_resource_info_map(branch, resource_info_map)

                if "Type" in state and state["Type"] == "Map":
                    branch = state['Iterator']
                    #print(str(branch))
                    resource_info_map = self._get_resource_info_map(branch, resource_info_map)
                    #print(str(resource_info_map))

        else:
            print("ERROR: invalid workflow description.")
            assert False

        return resource_info_map

    def _delete_resource_if_existing(self, existing_resources, resource_name):
        for g in existing_resources:
            if g.name == resource_name:
                self._client.delete_function(g)
                break
        print("deleted resource: " + resource_name)

    def _create_and_upload_resource(self, resource_name, resource_info):
        print("Deploying resource: " + resource_name)

        resource_filename = resource_info["resource_filename"]
        is_zip = resource_info["is_zip"]
        is_jar = resource_info["is_jar"]
        resource_req_filename = resource_info["resource_req_filename"]
        resource_env_filename = resource_info["resource_env_filename"]
        resource_runtime = resource_info["resource_runtime"]

        self._workflow_resources.append(resource_name)

        try:
            # add the resource
            g = self._client.add_function(resource_name, runtime=resource_runtime)

            # upload the resource source
            print('Uploading file: ' + resource_filename)
            if is_zip or is_jar:
                g.upload(resource_filename)
            else:
                source_text = ''
                with open(resource_filename, 'r') as f:
                    source_text = f.read()
                g.source = {"code": source_text}

            # upload the resource requirements
            if os.path.isfile(resource_req_filename):
                with open(resource_req_filename, "r") as f:
                    reqs = f.read().strip()
                    g.requirements = reqs
                    #print("set requirements for function: " + resource_name + " " + reqs)

            # resource environment variables
            # upload the resource environment variables
            if os.path.isfile(resource_env_filename):
                with open(resource_env_filename, "r") as f:
                    env_vars = f.read().strip()
                    g.environment_variables = env_vars
                    #print("set environment variables for function: " + resource_name + " " + env_vars)

        except Exception as e:
            print("ERROR: Could not create resource.")
            print(str(e))
            assert False

    def upload_workflow(self):
        self.undeploy_workflow()

        resource_info_map = self._get_resource_info_map()

        existing_resources = self._client.functions

        for resource_name in resource_info_map.keys():
            self._delete_resource_if_existing(existing_resources, resource_name)

            resource_info = resource_info_map[resource_name]

            self._create_and_upload_resource(resource_name, resource_info)

    def get_deployment_error(self):
        return self._deployment_error

    def deploy_workflow(self):
        try:
            wf = self._client.add_workflow(self._workflow_name)
            wf.json = json.dumps(self._workflow_description)
            wf.deploy(self._settings["timeout"])
            self._workflow = wf
            if self._workflow.status != "failed":
                print("MFN workflow " + self._workflow_name + " deployed.")
            else:
                print("MFN workflow " + self._workflow_name + " could not be deployed.")
                self._deployment_error = self._workflow.get_deployment_error()
        except Exception as e:
            print("ERROR: Could not deploy workflow.")
            raise e
            assert False

    def undeploy_workflow(self):
        existing_workflows = self._client.workflows
        for wf in existing_workflows:
            if wf.name == self._workflow_name:
                if wf.status == "deployed":
                    wf.undeploy(self._settings["timeout"])
                    print("Workflow undeployed.")
                self._client.delete_workflow(wf)
                break

        existing_resources = self._client.functions

        for resource_name in self._workflow_resources:
            self._delete_resource_if_existing(existing_resources, resource_name)

        self._client.disconnect()

    def get_test_workflow_endpoints(self):
        if self._workflow.status == "deployed":
            return self._workflow.endpoints

    def execute(self, message, timeout=None, check_duration=False):
        if timeout is None:
            timeout = self._settings["timeout"]
        return self._workflow.execute(message, timeout, check_duration)

    def get_workflow_logs(self, num_lines=500):
        data = self._workflow.logs(ts_earliest=self._log_clear_timestamp, num_lines=num_lines)
        return data

    def clear_workflow_logs(self):
        self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0)

    def report(self, success, inp, expected, actual):
        short_inp = self._get_printable(inp)

        if success:
            print(self._test_name + " test " + mfntestpassed + " with input data:", short_inp)
        else:
            print(self._test_name + " test " + mfntestfailed + " with input data:", short_inp + '(result: ' + json.dumps(actual) + ', expected: ' + json.dumps(expected) + ')')

    def exec_only(self, inp):
        any_failed_tests = False
        try:
            rn = self.execute(json.loads(inp))
            return rn
        except Exception as e:
            any_failed_tests = True
            self.undeploy_workflow()
            print(str(e))
            raise e
        finally:
            time.sleep(2)
            if any_failed_tests:
                self._print_logs(self._workflow.logs())

    def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, should_undeploy=True):
        any_failed_tests = False
        durations = []

        time.sleep(2)

        try:
            for tup in testtuplelist:
                current_test_passed = False
                inp, res = tup
                if check_duration:
                    rn, t_total = self.execute(json.loads(inp), check_duration=check_duration)
                else:
                    rn = self.execute(json.loads(inp))

                if check_duration:
                    durations.append(t_total)
                    #print("Total time to execute: " + str(t_total) + " (ms)")

                if check_just_keys:
                    if set(rn.keys()) == set(res.keys()):
                        current_test_passed = True
                else:
                    if rn == json.loads(res):
                        current_test_passed = True

                self.report(current_test_passed, inp, res, rn)
                any_failed_tests = any_failed_tests or (not current_test_passed)

                time.sleep(1)

        except Exception as e:
            print(str(e))
            raise e
        finally:
            time.sleep(2)
            if check_duration:
                print("------")
                print("Request/response latency statistics:")
                print("Number of executions: " + str(len(durations)))
                print("Average (ms): " + str(statistics.mean(durations)))
                print("Median (ms): " + str(statistics.median(durations)))
                print("Minimum (ms): " + str(min(durations)))
                print("Maximum (ms): " + str(max(durations)))
                print("Stdev (ms): " + str(statistics.stdev(durations)))
                print("PStdev (ms): " + str(statistics.pstdev(durations)))
                percentiles = [0.0, 50.0, 90.0, 95.0, 99.0, 99.9, 99.99, 100.0]
                self.print_percentiles(durations, percentiles)
                print("------")
            if any_failed_tests:
                self._print_logs(self._workflow.logs())
            if should_undeploy:
                self.undeploy_workflow()

    def _print_logs(self, logs):
        print(logs)
        for t in logs:
            if t == "timestamp":
                continue
            cur_log = logs[t]
            lines = cur_log.split("\n")
            for line in lines:
                print(line)
            print("------")

    def print_percentiles(self, data, percentiles):
        data.sort()
        for perc in percentiles:
            print(str(perc) + "th percentile (ms): " + str(self.percentile(data, perc/100.0)))

    def percentile(self, data, percent):
        k = (len(data)-1) * percent
        f = math.floor(k)
        c = math.ceil(k)
        if f == c:
            return data[int(k)]
        d0 = data[int(f)] * (c-k)
        d1 = data[int(c)] * (k-f)
        return d0 + d1

    def _get_printable(self, text, max_len=50):
        if len(text) > max_len:
            return text[:max_len] + " ... (showing " + str(max_len) + "/" + str(len(text)) + " characters.)"
        return text

    def plot_latency_breakdown(self, num_last_executions=15):
        eidlist = self.extract_execution_ids(num_last_executions)
        eid_filename = "eidlist_" + self._test_name + ".txt"
        timestamps_filename = "timestamps_" + self._test_name + ".txt"
        eidlist = eidlist[len(eidlist) - num_last_executions:]
        with open(eid_filename, "w") as f:
            for eid in eidlist:
                f.write(eid + "\n")

        self.parse_metrics(eid_filename, timestamps_filename)

        cmd = "python3 ../plotmfnmetrics.py " + timestamps_filename
        output, error = run_command_return_output(cmd)

        # cleanup
        cmd = "rm esresult.json " + eid_filename + " " + timestamps_filename
        _, _ = run_command_return_output(cmd)

    def parse_metrics(self, eid_filename, timestamps_filename):
        cmd = "python3 ../mfnmetrics.py -eidfile " + eid_filename
        output, error = run_command_return_output(cmd)
        log_lines = combine_output(output, error)
        with open(timestamps_filename, "w") as f:
            for line in log_lines:
                f.write(line + "\n")

    def extract_execution_ids(self, num_last_executions, num_log_lines=2000):
        cmd = "python3 ../wftail.py -n " + str(num_log_lines) + " -wname " + self._workflow_name
        output, error = run_command_return_output(cmd)
        log_lines = combine_output(output, error)
        eidlist = []
        for line in log_lines:
            line = line.strip()
            if line == "":
                continue
            tokens = line.split(" ")
            eid = tokens[7]
            if eid != "[0l]":
                eid = eid[1:-1]
                eidlist.append(eid)
                #print(eid)

        return eidlist

    def exec_keys_check(self, testtuplelist):
        self.exec_tests(testtuplelist, check_just_keys=True)

    # compatibility with older tests
    def cleanup(self):
        return
Ejemplo n.º 18
0
#   See the License for the specific language governing permissions and
#   limitations under the License.
"""
  trigger: a script that sets up a triggerable bucket and a function and workflow
  
  The workflow is associated with the triggerable bucket.
  Upon writing to the triggerable bucket, the workflow is executed.
  The function then writes the data to the general storage.
  The script tries to retrieve the data from the general storage.
"""
import base64
import time

from mfn_sdk import MfnClient

c = MfnClient()

function = c.add_function("react")
function.code = """
def handle(event, context):
    context.log("Triggered "+str(event))

    return None
"""

workflow = c.add_workflow("eventdriven_workflow")
workflow.json = """{
  "name": "eventdriven_workflow",
  "entry": "react",
  "functions": [
    {