예제 #1
0
def main():
    "Create flows"
    sys.stdout.flush()

    # Initialize workload
    workload  = Workload(args.workload)
    receivers = readReceivers()
    print "NUM RECEIVERS: %d" % len(receivers)

    flowStartCmd = "sudo python ./flowGenerator.py --src-ip %s --src-port %d --dest-ip %s --dest-port %d --num-packets %d --num-bands %d --max-packets %d --packet-size %d --out %s/send-%s-%d.txt > test.txt"

    #random.seed(1234568)
    print "STARTING AT TIME %f" % time()
    srcPort = 5000
    start = time()
    while time() - start < args.time:
        lambd = args.load * args.bw * 1000000 / 8 / args.packet_size / workload.getAverageFlowSize()
        waitTime = random.expovariate(lambd)
        print "Sleeping for %f seconds..." % waitTime
        sys.stdout.flush()
        sleep(waitTime)

        # get random receiver
        i = random.randrange(len(receivers))
        (dest_ip, dest_port) = receivers[i]
        numPackets = workload.getFlowSize()

        print "Sending %d packets from %s:%d to %s:%d" % (numPackets, args.src_ip, srcPort, dest_ip, dest_port)
        Popen(flowStartCmd % (args.src_ip, srcPort, dest_ip, dest_port, numPackets, args.num_bands, 
                              workload.getMaxFlowSize(), args.packet_size, args.output_dir, args.src_ip, srcPort), shell=True)
        srcPort += 1

    print "ENDING AT TIME %f" % time()
예제 #2
0
def test_edit_workload():
    # Workloads to compare against
    workload1 = Workload("test_workload(I_v3)", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 4000, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    workload2 = Workload("Small_Workload", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 200, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    # Call get_workloads 
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 2

    # Check that both workloads exist in the system
    assert compare_workloads(WL_data["test_workload(I_v3)"], workload1)
    assert compare_workloads(WL_data["Small_Workload"], workload2)

    # Now edit the first workload
    aixprt.edit_workload("test_workload(I_v3)", "workload_edited", "edited", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", "5000", "0.01", "10", "10", "20", "100", "-2", "100", "True", "0", "0", "0", "")
    workload1_edited = Workload("workload_edited", "edited", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 5000, 0.01, 10, 10, 20, 100, -2, 100, True, 0, 0, 0, "")
    
    # Enure it was edited properly
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 2
    assert not "test_workload(I_v3)" in WL_data
    assert "workload_edited" in WL_data
    assert compare_workloads(WL_data["workload_edited"], workload1_edited)
    
    # Now try to edit a workload to have all invalid parameters (with the exception of name, comment, and command)
    actual_invalid = aixprt.edit_workload("Small_Workload", "invalidWL", "", '', "cat", "0.0.0.0", "1.0", "False", "1.0", "asdga100", "--2", "1%00", "FTrue", "z", "e", "ro", "")
    expected_invalid = ["tfhub_model", "training_steps", "learning_rate", "testing_percentage", "validation_percentage", "eval_step_interval", "train_batch_size", "test_batch_size", "validation_batch_size", "flip_left_right", "random_crop", "random_scale", "random_brightness"]
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 2
    assert len(expected_invalid) == 13
    assert expected_invalid == actual_invalid
    
    # Ensure that the workload was not edited
    assert compare_workloads(WL_data["Small_Workload"], workload2)
예제 #3
0
def test_remove_workload():
    expected_workload1 = Workload("test_workload(I_v3)", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 4000, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    expected_workload2 = Workload("Small_Workload", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 200, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    expected_workload3 = Workload("Smaller_Workload", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 100, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")

    # Ensure file is setup properly
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 2
    assert "test_workload(I_v3)" in WL_data
    assert "Small_Workload" in WL_data
    
    # Try to remove a non-existent workload
    assert aixprt.remove_workload("Not_in_file") == 0
    assert len(WL_data) == 2
    assert "test_workload(I_v3)" in WL_data
    assert "Small_Workload" in WL_data
    
    # Remove an existing workload
    actual_workload1 = aixprt.remove_workload("test_workload(I_v3)")
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 1
    assert not "test_workload(I_v3)" in WL_data 
    assert compare_workloads(expected_workload1, actual_workload1)

    # Remove an existing workload that is used in a suite
    suite1 = aixprt.get_suite("Small_Suite")
    assert len(suite1) == 2
    actual_workload2 = aixprt.remove_workload("Small_Workload")
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 0
    assert not "Small_Workload" in WL_data 
    assert compare_workloads(expected_workload2, actual_workload2)
    suite1 = aixprt.get_suite("Small_Suite")
    assert len(suite1) == 1
    assert not "Small_Workload" in suite1 

    # Remove a workload that occurs multiple times in multiple suites
    # Setup for this particular test:
    aixprt.add_workload("test_workload(I_v3)", "", 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1', "4100", "0.01", "10", "10", "10", "100", "-1", "100", "False", "0", "0", "0", "")
    aixprt.add_workload("Small_Workload", "", 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1', "200", "0.01", "10", "10", "10", "100", "-1", "100", "False", "0", "0", "0", "")
    aixprt.add_workload("Smaller_Workload", "", 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1', "100", "0.01", "10", "10", "10", "100", "-1", "100", "False", "0", "0", "0", "")
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 3
    suite2_A = aixprt.get_suite("Small_Suite")
    assert len(suite2_A) == 1
    suite2_B = aixprt.get_suite("Smaller_Suite")
    assert len(suite2_B) == 2
    # Now remove 
    actual_workload3 = aixprt.remove_workload("Smaller_Workload")
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 2
    assert not "Smaller_Workload" in WL_data
    assert compare_workloads(expected_workload3, actual_workload3)
    suite2_A = aixprt.get_suite("Small_Suite")
    assert len(suite2_A) == 0
    assert not "Smaller_Workload" in suite2_A
    suite2_B = aixprt.get_suite("Smaller_Suite")
    assert len(suite2_B) == 0
    assert not "Smaller_Workload" in suite2_B
예제 #4
0
def workload():
    """
    A fixture for a Workload.
    """

    _workload = Workload()
    yield _workload
    _workload.terminate()
예제 #5
0
 def generate_query(self):
     if 'workload' in self.scheme:
         rel_list = [(x.source, x.target, x.label)
                     for x in self.relation_ins]
         wl = Workload(list(self.node_labels), rel_list, self.workload)
         queries = wl.generate_workload()
         filename = 'query.cypher'
         tl = Translate(filename)
         tl.translate(queries)
예제 #6
0
def test_get_workloads():
    workload1 = Workload("test_workload(I_v3)", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 4000, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    workload2 = Workload("Small_Workload", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 200, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    # Call get_workloads 
    WL_data = aixprt.get_workloads()
    # Check that there are only 2 workloads in the list and ensure both are correct
    assert len(WL_data) == 2
    assert "test_workload(I_v3)" in WL_data
    assert "Small_Workload" in WL_data
    assert compare_workloads(WL_data["test_workload(I_v3)"], workload1)
    assert compare_workloads(WL_data["Small_Workload"], workload2)
예제 #7
0
def main(names, component_id=1):
    count = len(names)
    pp.figure(figsize=(14, 2 * count), facecolor='w', edgecolor='k')
    for i in range(count):
        data = Workload.aggregate(Workload.locate(names[i]), 'dynamic_power')
        step_count = data.shape[1]
        time = constant.TIME_STEP * np.arange(step_count)
        pp.subplot(count, 1, i + 1)
        pp.plot(time, data[component_id, :])
        pp.xlim([time[0], time[-1]])
        pp.xlabel('Time (s)')
        pp.ylabel('Power (W)')
    pp.show()
예제 #8
0
def process(name):
    data = Workload.aggregate(Workload.locate(name), 'dynamic_power')
    component_count, step_count = data.shape
    time = constant.TIME_STEP * np.arange(0, step_count)
    pp.figure(figsize=(14, 2 * component_count), facecolor='w', edgecolor='k')
    for i in range(component_count):
        pp.subplot(component_count, 1, i + 1)
        pp.plot(time, data[i, :])
        pp.xlim([time[0], time[-1]])
        pp.ylabel('Power (W)')
        if i == 0:
            pp.title(name)
    pp.xlabel('Time (s)')
예제 #9
0
def get_workloads():
    """
    Retrieves the current workloads in the workloads.json file and returns them as a dictionary of paired
    workload names and Workload objects

    :return: A dictionary with each workload name being used as keys to access the related workload objects
    """
    data = {}
    json_path = Path("workloads.json")
    if (not json_path.is_file()):
        return data

    data = load_workloads()
    workloads_dict = {}

    for x in data['workloads']:
        wl = Workload(x['name'], x['comment'], x['tfhub model'],
                      x['training steps'], x['learning rate'],
                      x['testing percentage'], x['validation percentage'],
                      x['eval step interval'], x['train batch size'],
                      x['test batch size'], x['validation batch size'],
                      x['flip left/right'], x['random crop'],
                      x['random scale'], x['random brightness'], x['command'])
        workloads_dict[x['name']] = wl

    return workloads_dict
예제 #10
0
    def generateWorkload(self):
        W = []
        logging.basicConfig(filename='example.log', level=logging.DEBUG)
        creation_Time = 0
        i = 0
        TotalCPUTime = np.random.normal(self.mean_job_time, self.sd_job_time)
        TotalCPUTime = round(TotalCPUTime * self.time_unit)
        logging.info('Task' + str(i) + ":Creation Time:" + str(creation_Time) +
                     ":Total CPU time:" + str(TotalCPUTime))
        IO = IOGenerator(self.rate_io, TotalCPUTime, self.mean_io_time,
                         self.sd_io_time, self.time_unit)
        iolist = IOList(IO.generateIOEvent())
        T = Task("Task 0", 0, np.abs(creation_Time), np.abs(TotalCPUTime),
                 iolist)
        W.append(T)

        for i in xrange(self.num_jobs):
            creation_Time = round(
                random.expovariate(self.rate_jobs) *
                self.time_unit) + W[-1].creationTime
            TotalCPUTime = np.random.normal(self.mean_job_time,
                                            self.sd_job_time)
            TotalCPUTime = round(TotalCPUTime * self.time_unit)
            logging.info('Task' + str(i) + ":Creation Time:" +
                         str(creation_Time) + ":Total CPU time:" +
                         str(TotalCPUTime))
            IO = IOGenerator(self.rate_io, TotalCPUTime, self.mean_io_time,
                             self.sd_io_time, self.time_unit)
            iolist = IOList(IO.generateIOEvent())
            T = Task("Task " + str(i), 0, np.abs(creation_Time),
                     np.abs(TotalCPUTime), iolist)
            W.append(T)
        self.work_load_list = W
        w = Workload(W)
        return w
예제 #11
0
 def create_workload(self, name, image="busybox", network=None):
     """
     Create a workload container inside this host container.
     """
     workload = Workload(self, name, image=image, network=network)
     self.workloads.add(workload)
     return workload
예제 #12
0
 def create_workload(self, name, image="busybox", network="bridge", ip=None, labels=[]):
     """
     Create a workload container inside this host container.
     """
     workload = Workload(self, name, image=image, network=network, ip=ip, labels=labels)
     self.workloads.add(workload)
     return workload
예제 #13
0
def remove_workload(workload_name):
    """
    Removes a workload from the workloads.json

    :param workload_name: name of the workload being removed\n
    :return: the removed workload, if it was sucessfuly removed from the list; else, it returns 0\n
    """
    # Opening workloads.json into data
    data = {}
    json_path = Path("workloads.json")
    if (not json_path.is_file()):
        return data
    data = load_workloads()

    removed_WL = {}
    index = 0
    # Traverses through the list of workloads until it finds a matching name,
    # then it removes the workload from the list and saves it to removed_WL
    for x in data['workloads']:
        if x['name'] == workload_name:
            removed_WL = data['workloads'].pop(index)
        index += 1
        # If a workload was removed, then update the json and return the removed workload
    if removed_WL:
        with open('workloads.json', 'w') as json_file:
            json.dump(data, json_file, indent=4)
            json_file.close()

        # Check if that workload was in any suites
        suite_appearances = is_workload_in_suites(workload_name)
        # If so, then remove them from the suites
        if not suite_appearances == 1:
            count = len(suite_appearances) - 1
            while count >= 0:
                remove_workload_from_suite(suite_appearances[count][1],
                                           suite_appearances[count][0])
                count -= 1

        # Create an actual workload object with removed_WL
        removed_WL = Workload(
            removed_WL['name'], removed_WL['comment'],
            removed_WL['tfhub model'], removed_WL['training steps'],
            removed_WL['learning rate'], removed_WL['testing percentage'],
            removed_WL['validation percentage'],
            removed_WL['eval step interval'], removed_WL['train batch size'],
            removed_WL['test batch size'], removed_WL['validation batch size'],
            removed_WL['flip left/right'], removed_WL['random crop'],
            removed_WL['random scale'], removed_WL['random brightness'],
            removed_WL['command'])
        return removed_WL
    # Else, return 0
    return 0
예제 #14
0
def main(names, draw=True):
    paths = []
    for name in names:
        paths.extend(Workload.enumerate(name))
    print('%-20s %10s %10s' % ('Benchmark', 'Components', 'Time, s'))
    count = len(paths)
    cores = np.zeros([count, 1])
    cache = np.zeros([count, 1])
    for i in range(count):
        name = '/'.join(paths[i].split('/')[-2:]).replace('.sqlite3', '')
        data = Workload.aggregate(paths[i], 'dynamic_power')
        [component_count, step_count] = data.shape
        sys.stdout.write(
            '%-20s %10d %10.2f' %
            (name, component_count, constant.TIME_STEP * step_count))
        total = np.sum(data)
        index = int(max(1, np.floor(component_count / 4)))
        index = range(component_count - index)
        for j in range(component_count):
            local = np.sum(data[j, :])
            sys.stdout.write(' %10.2e (%5.2f%%)' %
                             (local, 100 * local / total))
            if j in index:
                cores[i] += local
            else:
                cache[i] += local
        print()
    scale = cores + cache
    cores = 100 * cores / scale
    cache = 100 * cache / scale
    print('Average cache contribution: %.2f%%' % np.mean(cache))
    if not draw:
        return
    index = list(range(count))
    pp.figure(figsize=(14, 6), facecolor='w', edgecolor='k')
    pp.bar(index, cores, color='b')
    pp.bar(index, cache, color='y', bottom=cores)
    pp.ylim([0, 101])
    pp.show()
예제 #15
0
    def dummyGen():

        ioList1 = IOList([IO(10, 5), IO(13, 7)])
        ioList2 = IOList([])

        # high priority IO instensive task
        t1 = Task("Task 1", 0, 3, 20, ioList1)

        # low priority CPU intensive task
        t2 = Task("Task 2", 10, 0, 15, ioList2)

        w = Workload([t1, t2])

        return w
예제 #16
0
      def _queryTrObjsByCatView(wlNames=None, areaIds=None, areaNames=None,
                                branchNames=None, testerIds=None, bldTypes=None):
         def _argsWrapper(args):
            if not args:
               return [None]
            else:
               return args

         if not wlNames:
            wlIds = [None]
         else:
            from workload import Workload
            wlIds = Workload.getWorkloadIds(wlNames)

         if areaNames:
            areaIds = Area.getAreaIds(areaNames)
         elif areaIds:
            childrenIds = Area.getChildrenAreaIds(areaIds)
            if childrenIds:
               areaIds.extend(childrenIds)
         else:
            areaIds = [None]

         # The order for filterKey and filterLoop should be matched
         filterKey = ['workload', 'area__id__in', 'deliverables__build__branch__in',
                      'tester__id__in', 'deliverables__build__bldtype__in']
         filterLoop = itertools.product(wlIds, areaIds, _argsWrapper(branchNames),
                                        _argsWrapper(testerIds),
                                        _argsWrapper(bldTypes))
         filterLoop = list(filterLoop)

         trObjs = []
         for item in filterLoop:
            filterMap = {}
            for i in range(len(item)):
               if item[i] is None:
                  continue
   
               filterMap[filterKey[i]] = item[i]
   
            logger.info("Retrieving testrun data via RestAPI with filterInfo: %s",
                        filterMap)
            tmpTrObjs = queryCatInfo('testrun', filterMap, limitDay=limitDay,
                                     limit=limitNumber, orderBy="-endtime")
            if tmpTrObjs:
               logger.info("Totally get %d testruns" % len(tmpTrObjs))
               trObjs.extend(tmpTrObjs)

         return trObjs
예제 #17
0
 def create_workload(self,
                     base_name,
                     image="busybox",
                     network="bridge",
                     ip=None,
                     labels=[],
                     namespace=None):
     """
     Create a workload container inside this host container.
     """
     name = base_name + "_" + \
         ''.join([random.choice(string.ascii_letters) for ii in range(6)]).lower()
     workload = Workload(self,
                         name,
                         image=image,
                         network=network,
                         ip=ip,
                         labels=labels,
                         namespace=namespace)
     self.workloads.add(workload)
     return workload
예제 #18
0
def test_add_workload():
    WL_data = aixprt.get_workloads()
    workload1 = Workload("test_workload1", "test comment", 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1', 4100, 0.02, 10, 20, 10, 100, -2, 100, True, 0, 0, 0, "")
    
    # Test to ensure that the workload to be added does not exist in the file
    assert not "test_workload1" in WL_data
    # Then add and assert that it does exist in it
    aixprt.add_workload("test_workload1", "test comment", 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1', "4100", "0.02", "10", "20", "10", "100", "-2", "100", "True", "0", "0", "0", "")
    WL_data = aixprt.get_workloads()
    assert "test_workload1" in WL_data
    assert compare_workloads(WL_data["test_workload1"], workload1)

    # Try to add the same workload again and ensure it does not
    assert len(WL_data) == 3
    assert aixprt.add_workload("test_workload1", "test comment", 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1', "4100", "0.02", "10", "20", "10", "100", "-2", "100", "True", "0", "0", "0", "") == 1
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 3

    # Try to add invalid workloads and ensure it does not
    # Workload with 1 thing wrong
    invalid_1 = aixprt.add_workload("wrong_workload", "test comment", 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1', "4100", "0.02", "1.0", "20", "10", "100", "-2", "100", "True", "0", "0", "0", "")
    expected_invalid_1 = ["testing_percentage"]
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 3
    assert len(invalid_1) == 1
    assert invalid_1 == expected_invalid_1
    # Workload with everything wrong (except for the comment and command parameters, which can both be empty strings)
    invalid_2 = aixprt.add_workload("", "", '', "cat", "0..02", "1.0", "False", "1.0", "asdga100", "--2", "1%00", "FTrue", "z", "e", "ro", "")
    expected_invalid_2 = ["name", "tfhub_model", "training_steps", "learning_rate", "testing_percentage", "validation_percentage", "eval_step_interval", "train_batch_size", "test_batch_size", "validation_batch_size", "flip_left_right", "random_crop", "random_scale", "random_brightness"]
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 3
    assert len(invalid_2) == 14 #TODO fix after regex is corrected
    assert invalid_2 == expected_invalid_2

    # Add a workload with a command
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 3
    assert aixprt.add_workload("test_workload2", "", '', "", "", "", "", "", "", "", "", "", "", "", "", "help dir") == 0
    WL_data = aixprt.get_workloads()
    assert len(WL_data) == 4    
예제 #19
0
파일: run.py 프로젝트: maniaabdi/wlmod

def parse_config(fpath):
    conf = yaml.load(open(fpath, 'r'), Loader=yaml.FullLoader)['workload']
    if conf['interarrival'].lower().endswith('s'):
        conf['interarrival'] = int(conf['interarrival'].lower().rsplit('s',
                                                                       1)[0])

    if conf['duration'].lower().endswith('s'):
        conf['duration'] = int(conf['duration'].lower().rsplit('s', 1)[0])
    elif conf['duration'].lower().endswith('m'):
        conf['duration'] = int(conf['duration'].lower().rsplit('m', 1)[0]) * 60
    elif conf['duration'].lower().endswith('h'):
        conf['duration'] = int(conf['duration'].lower().rsplit('h',
                                                               1)[0]) * 3600

    if conf['cache']['size'].lower().endswith('g'):
        conf['cache']['size'] = int(conf['cache']['size'].lower().rsplit(
            'g', 1)[0]) * (1 << 30)

    return conf


if __name__ == '__main__':
    conf_file = sys.argv[1] if len(sys.argv) > 1 else 'config.yaml'
    conf = parse_config(conf_file)
    env = simpy.Environment()
    workload = Workload(conf, type='simulation', env=env)
    env.run(until=conf['duration'])
    workload.dump_stats()
예제 #20
0
def add_workload(name,
                 comment,
                 tfhub_model,
                 training_steps,
                 learning_rate,
                 testing_percentage,
                 validation_percentage,
                 eval_step_interval,
                 train_batch_size,
                 test_batch_size,
                 validation_batch_size,
                 flip_left_right,
                 random_crop,
                 random_scale,
                 random_brightness,
                 command=None):
    """
    Adds a new workload entry to the workloads.json file

    :param name: Name of the new workload\n
    :param comment: Optional workload comment/description of the workload\n
    :param tfhub_model: URL pertaining to the machine learning model that is to be used from Tensorflow's github\n
    :param training_steps: The number of training steps that are run before ending\n
    :param learning_rate: Numerical rate pertaining to the rate of learning when training\n
    :param testing_percentage: Percentage of images to use as a test set\n
    :param validation_percentage: Percentage of images to use as a validation set\n
    :param eval_step_interval: Number of steps to evaluate the training results after\n
    :param train_batch_size: Number of images to train on at a time\n
    :param test_batch_size: Number of images to test on. "-1" causes  the entire test set to be used\n
    :param validation_batch_size: Number of images to use in the evaluation batch. "-1" causes the entire validation set to be used\n
    :param flip_left_right: True or False, whether to randomly flip half of the training images horizontally\n
    :param random_crop: Percentage determining how much of a margin to randomly crop off the training images\n
    :param random_scale: Percentage determining how much to randomly scale the size of the training images by\n
    :param random_brightness: Percentage determining how much to randomly multiply the training image input pixels up or down by\n
    :param command: Optional console command to be run upon running this workload\n
    :return: A list of invalid parameters if an error has occured. A 0 is returned on success\n
    """
    # Creating a new workload to validate parameters
    new_WL = Workload(name, comment, tfhub_model, training_steps,
                      learning_rate, testing_percentage, validation_percentage,
                      eval_step_interval, train_batch_size, test_batch_size,
                      validation_batch_size, flip_left_right, random_crop,
                      random_scale, random_brightness, command)

    invalid_params = ""
    invalid_params = new_WL.validate_parameters()

    # If parameters are valid, then try to open workloads.json
    if len(invalid_params) == 0:
        json_path = Path("workloads.json")
        if json_path.is_file():
            data = load_workloads()
            for x in data['workloads']:
                # Check if a workload with the same name already exists; if so, don't add this new one
                if (x['name'] == name):
                    print(f"Workload {name} already exists.")
                    return 1
        # If workloads.json exists and the workload is acceptable, then add it to the workloads dictionary
        else:
            data = {}
            data['workloads'] = []
        data['workloads'].append({
            'name': new_WL.name,
            'comment': new_WL.comment,
            'tfhub model': new_WL.tfhub_model,
            'training steps': new_WL.training_steps,
            'learning rate': new_WL.learning_rate,
            'testing percentage': new_WL.testing_percentage,
            'validation percentage': new_WL.validation_percentage,
            'eval step interval': new_WL.eval_step_interval,
            'train batch size': new_WL.train_batch_size,
            'test batch size': new_WL.test_batch_size,
            'validation batch size': new_WL.validation_batch_size,
            'flip left/right': new_WL.flip_left_right,
            'random crop': new_WL.random_crop,
            'random scale': new_WL.random_scale,
            'random brightness': new_WL.random_brightness,
            'command': new_WL.command
        })
        # Put updated dictionary into workloads.json
        with open('workloads.json', 'w') as json_file:
            json.dump(data, json_file, indent=4)
            json_file.close()
        # Success
        return 0
    else:
        return invalid_params
예제 #21
0
def edit_workload(original_name,
                  new_name,
                  comment,
                  tfhub_model,
                  training_steps,
                  learning_rate,
                  testing_percentage,
                  validation_percentage,
                  eval_step_interval,
                  train_batch_size,
                  test_batch_size,
                  validation_batch_size,
                  flip_left_right,
                  random_crop,
                  random_scale,
                  random_brightness,
                  command=None):
    """
    Edits an existing workload in the workloads.json file

    :param original_name: Name of the workload to be edited\n
    :param new_name: Name of the new workload\n
    :param comment: Optional workload comment/description of the workload\n
    :param tfhub_model: URL pertaining to the machine learning model that is to be used from Tensorflow's github\n
    :param training_steps: The number of training steps that are run before ending\n
    :param learning_rate: Numerical rate pertaining to the rate of learning when training\n
    :param testing_percentage: Percentage of images to use as a test set\n
    :param validation_percentage: Percentage of images to use as a validation set\n
    :param eval_step_interval: Number of steps to evaluate the training results after\n
    :param train_batch_size: Number of images to train on at a time\n
    :param test_batch_size: Number of images to test on. "-1" causes  the entire test set to be used\n
    :param validation_batch_size: Number of images to use in the evaluation batch. "-1" causes the entire validation set to be used\n
    :param flip_left_right: True or False, whether to randomly flip half of the training images horizontally\n
    :param random_crop: Percentage determining how much of a margin to randomly crop off the training images\n
    :param random_scale: Percentage determining how much to randomly scale the size of the training images by\n
    :param random_brightness: Percentage determining how much to randomly multiply the training image input pixels up or down by\n
    :param command: Optional console command to be run upon running this workload\n
    :return: A list of invalid parameters if an error has occured\n
    """
    # Creating a updated workload to validate parameters
    new_WL = Workload(new_name, comment, tfhub_model, training_steps,
                      learning_rate, testing_percentage, validation_percentage,
                      eval_step_interval, train_batch_size, test_batch_size,
                      validation_batch_size, flip_left_right, random_crop,
                      random_scale, random_brightness, command)

    invalid_params = ""
    invalid_params = new_WL.validate_parameters()

    # If parameters are valid, then try to open workloads.json
    if len(invalid_params) == 0:
        data = {}
        json_path = Path("workloads.json")
        # Check if the workloads file exists in order to edit it
        if (not json_path.is_file()):
            print("There are currently no workloads to edit.")
            return
        # If it exists, then load the workloads from it
        data = load_workloads()
        # Edit the parameters of the workload corresponding to the original name
        for x in data['workloads']:
            if x['name'] == original_name:
                x['name'] = new_WL.name
                x['comment'] = new_WL.comment
                x['tfhub model'] = new_WL.tfhub_model
                x['training steps'] = new_WL.training_steps
                x['learning rate'] = new_WL.learning_rate
                x['testing percentage'] = new_WL.testing_percentage
                x['validation percentage'] = new_WL.validation_percentage
                x['eval step interval'] = new_WL.eval_step_interval
                x['train batch size'] = new_WL.train_batch_size
                x['test batch size'] = new_WL.test_batch_size
                x['validation batch size'] = new_WL.validation_batch_size
                x['flip left/right'] = new_WL.flip_left_right
                x['random crop'] = new_WL.random_crop
                x['random scale'] = new_WL.random_scale
                x['random brightness'] = new_WL.random_brightness
                x['command'] = new_WL.command
        # Put updated dictionary into workloads.json
        with open('workloads.json', 'w') as json_file:
            json.dump(data, json_file, indent=4)
            json_file.close()

        return 0
    else:
        return invalid_params
예제 #22
0
import datetime
from workload import Workload

date = datetime.date(2018, 6, 1)

workload = Workload()
issues = workload.getWorkloadPerIssue(date)

totalWorkload = 0
print "| Issue | Workload |"
for issue in issues:
    totalWorkload += issues[issue]
    print "|[" + issue.key + "|" + workload.account + issue.key + "] " + issue.fields.summary + "| " + str(
        float(issues[issue]) / 3600) + "h|"

print "| Total |*" + str(float(totalWorkload) / 3600) + "h*|"
예제 #23
0
from mongo import Mongo
from pymongo import *
from query import Query
import json
from bson import json_util
from datetime import datetime
from workload import Workload

if __name__ == "__main__":

    # Connect to Mongo
    mongo = Mongo()
    db = mongo.getDb()

    pipeline = Workload().getWorkload()

    f = open("demofile.md", "w")

    lst = db.list_collection_names()
    lst.sort()
    f.write('# Execution stats for each query')
    f.write('\n')
    f.write('\n')
    for i in lst:
        f.write('- [' + i + '](#' + i + ')')
        f.write('\n')

    f.write('\n')

    for i in lst:
        f.write('## ')
예제 #24
0
def main():
    # NOTE: you can set invoke count of each function below the chain definitions (the concurrencies variable)
    # aws
    lambda1 = LambdaFunction(url="https://lambda1.com", id=1)
    lambda2 = LambdaFunction(url="https://lambda2.com", id=2)
    lambda3 = LambdaFunction(url="https://lambda3.com", id=3)
    lambda4 = LambdaFunction(url="https://lambda4.com", id=4)
    ## Chain 0 ##
    node1_c0 = ChainNode(function=lambda1, nodeID=1, children=[], lastNodeIDs=[1], chainFunctionIDs=[1], args={})
    chain0 = node1_c0
    ## Chain 1 ##
    #    L2
    #   /
    # L1
    #   \
    #    L3
    node3_c1 = ChainNode(function=lambda3, nodeID=3, children=[], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
    node2_c1 = ChainNode(function=lambda2, nodeID=2, children=[], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
    node1_c1 = ChainNode(function=lambda1, nodeID=1, children=[node2_c1, node3_c1], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
    chain1 = node1_c1
    ## Chain 2 ##
    # L3-L2-L1
    node3_c2 = ChainNode(function=lambda1, nodeID=3, children=[], lastNodeIDs=[3], chainFunctionIDs=[1,2,3], args={})
    node2_c2 = ChainNode(function=lambda2, nodeID=2, children=[node3_c2], lastNodeIDs=[3], chainFunctionIDs=[1,2,3], args={})
    node1_c2 = ChainNode(function=lambda3, nodeID=1, children=[node2_c2], lastNodeIDs=[3], chainFunctionIDs=[1,2,3], args={})
    chain2 = node1_c2
    ## Chain 3 ##
    #    L2
    #   /
    # L1
    #   \
    #    L3-L4-L4-L4
    node6_c3 = ChainNode(function=lambda4, nodeID=6, children=[], lastNodeIDs=[6,2], chainFunctionIDs=[1,2,3,4,4,4], args={})
    node5_c3 = ChainNode(function=lambda4, nodeID=5, children=[node6_c3], lastNodeIDs=[6,2], chainFunctionIDs=[1,2,3,4,4,4], args={})
    node4_c3 = ChainNode(function=lambda4, nodeID=4, children=[node5_c3], lastNodeIDs=[6,2], chainFunctionIDs=[1,2,3,4,4,4], args={})
    node3_c3 = ChainNode(function=lambda3, nodeID=3, children=[node4_c3], lastNodeIDs=[6,2], chainFunctionIDs=[1,2,3,4,4,4], args={})
    node2_c3 = ChainNode(function=lambda2, nodeID=2, children=[], lastNodeIDs=[6,2], chainFunctionIDs=[1,2,3,4,4,4], args={})
    node1_c3 = ChainNode(function=lambda1, nodeID=1, children=[node2_c1, node3_c1], lastNodeIDs=[6,2], chainFunctionIDs=[1,2,3,4,4,4], args={})
    chain3 = node1_c3
    ## Chain 4 ##
    #    L2
    #   /
    # L1-L3
    #   \
    #    L4
    node4_c4 = ChainNode(function=lambda4, nodeID=4, children=[], lastNodeIDs=[2,3,4], chainFunctionIDs=[1,2,3,4], args={})
    node3_c4 = ChainNode(function=lambda3, nodeID=3, children=[], lastNodeIDs=[2,3,4], chainFunctionIDs=[1,2,3,4], args={})
    node2_c4 = ChainNode(function=lambda2, nodeID=2, children=[], lastNodeIDs=[2,3,4], chainFunctionIDs=[1,2,3,4], args={})
    node1_c4 = ChainNode(function=lambda1, nodeID=1, children=[node2_c4, node3_c4, node4_c4], lastNodeIDs=[2,3,4], chainFunctionIDs=[1,2,3,4], args={})
    chain4 = node1_c4
    ## Chain 5 ##
    #    L2
    #   /
    # L1
    #   \
    #    L3-L3
    node4_c5 = ChainNode(function=lambda3, nodeID=4, children=[], lastNodeIDs=[4,2], chainFunctionIDs=[1,2,3,3], args={})
    node3_c5 = ChainNode(function=lambda3, nodeID=3, children=[node4_c5], lastNodeIDs=[4,2], chainFunctionIDs=[1,2,3,3], args={})
    node2_c5 = ChainNode(function=lambda2, nodeID=2, children=[], lastNodeIDs=[4,2], chainFunctionIDs=[1,2,3,3], args={})
    node1_c5 = ChainNode(function=lambda1, nodeID=1, children=[node2_c5, node3_c5], lastNodeIDs=[4,2], chainFunctionIDs=[1,2,3,3], args={})
    chain5 = node1_c5

    ######################################################################################################################
    # NOTE: real applications
    ## ObjectDetection using openCV 
    #    L2
    #   /
    # L1
    #   \
    #    L3
    objectDetection1 = LambdaFunction(url="image-preprocessing.com", id=1)
    objectDetection2 = LambdaFunction(url="opencv_maskrcnn.com", id=2)
    objectDetection3 = LambdaFunction(url="opencv_yolo.com", id=3)
    node3_c6 = ChainNode(function=objectDetection3, nodeID=3, children=[], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
    node2_c6 = ChainNode(function=objectDetection2, nodeID=2, children=[], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
    node1_c6 = ChainNode(function=objectDetection1, nodeID=1, children=[node2_c6, node3_c6], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
    chain6 = node1_c6

    concurrencies = [
        {"chain": chain0, "count": 0},
        {"chain": chain1, "count": 1},
        {"chain": chain2, "count": 1},
        {"chain": chain3, "count": 0},
        {"chain": chain4, "count": 0},
        {"chain": chain5, "count": 1},
        {"chain": chain6, "count": 0},
    ]

    chain_fan2 = Workload("Fan2", 5, 20, chain1, 1, 1000)
    chain_linear = Workload("Linear", 5, 20, chain2, 2, 1000)
    chain_fan_linear = Workload("FanAndLinear", 5, 20, chain5, 5, 1000)

    # Set up Kafka
    # kafka_url = "localhost:9092"
    # kafka_pq_topic = "pending_queue"
    # producer_PQ = connect_kafka_producer(kafka_url)
    # # Producer Side Logging
    # kafka_psq_topic = "producer_side_logging_queue" 
    # producer_PSQ = connect_kafka_producer(kafka_url)

    # Functions are invoked here
    chain_fan2.startWorkload(producer_PQ, producer_PSQ)
    chain_linear.startWorkload(producer_PQ, producer_PSQ)
    chain_fan_linear.startWorkload(producer_PQ, producer_PSQ)

    producer_PQ.close()
    producer_PSQ.close()
예제 #25
0
def main(file_path):
    workload = Workload(file_path)
    for t, workload in workload.next():
        time.sleep(1)  # sleep for 1 sec
        periodic_curl(workload)
예제 #26
0
from mongo import Mongo
from workload import Workload
from query import Query
import pandas as pd
from table import Table

if __name__ == "__main__":
    mongo = Mongo()
    db = mongo.getDb()
    list_of_collections = db.list_collection_names()
    workload = Workload()
    pipeline = workload.getWorkload()

    data = []
    for i in pipeline:
        dct = {}
        for j in list_of_collections:
            q = Query(db, j, i)
            dct[j] = q.getQueryExecTime()
        data.append(dct)
    df = pd.DataFrame(data)

    df_total = pd.DataFrame([dict(df.sum())])

    list_of_queries= []
    for i in range(len(pipeline)):
        list_of_queries.append('Query '+str(i+1))

    df['Query'] = list_of_queries
    df.set_index('Query', inplace=True)
예제 #27
0
def test_compare_workloads():
    workloadA_1 = Workload("Small_Workload", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 200, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    workloadA_2 = Workload("Small_Workload", "", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 200, 0.01, 10, 10, 10, 100, -1, 100, False, 0, 0, 0, "")
    workloadB = Workload("test_workload(I_v3)", "comment", "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", 4000, 0.02, 30, 10, 110, 100, -3, 100, True, 0, 0, 0, "")
    assert compare_workloads(workloadA_1, workloadA_2)
    assert not compare_workloads(workloadA_1, workloadB)
예제 #28
0
def getAllStages(execStats):
    s = set()
    if type(execStats) == type({}):
        for i in execStats:
            if i == 'stage':
                s.add(execStats[i])
            else:
                s.update(getAllStages(execStats[i]))
    elif type(execStats) == type([]):
        for i in execStats:
            s.update(getAllStages(i))
    return s


if __name__ == "__main__":
    p = Workload()
    mongo = Mongo()
    db = mongo.getDb()
    list_of_collections = db.list_collection_names()
    list_of_collections.sort()
    pipeline = p.getWorkload()
    f = open("list_operations.md", "w")
    for i in list_of_collections:
        f.write('## ')
        f.write(i)
        f.write('\n')
        f.write('\n')
        f.write('| Query | Operations |')
        f.write('\n')
        f.write('|---|---|')
        f.write('\n')