Пример #1
0
def main():
    Configuration.para = True

    model = load_file("testcase/vl2")
    model.sfc_list = model.sfc_list[:200]

    results = {}

    k_list = [64, 128, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 4096]
    k_list = [16]
    config.K_MIN = 16

    for k in k_list:
        result = {}
        config.K = k
        print('k =', k)
        model.clear()

        tic()
        result['optimal'] = linear_programming(model)
        result['RORP'] = ROR(model)
        result['RORP time'] = toc()
        model.clear()
        result['greedy'] = PARC(model)
        print_dict_result(result, model)
        results[k] = result

    save_obj(results, "results/k/total_{}".format(current_time()))
Пример #2
0
 def test_tictoc(self):
     """Test simple cases"""
     tic()
     delta_gt = 0.5
     time.sleep(delta_gt)
     elapsed = toc()
     diff = abs(delta_gt - elapsed)
     assert diff < 0.01, (f"Diff is too big. Expected diff: {0.01}"
                          ", obtained diff {diff}")
Пример #3
0
    def test_buffer(self):
        """Test number of elements in the buffer"""
        num_iters = 20
        for _ in range(num_iters):
            tic()

        num_tics = len(
            _TICTOC_HELPER_CLASS_5da0381c_27af_4d67_8881._timers_start)

        assert num_tics == num_iters, "Invlid number of tics in buffer"
Пример #4
0
def symmetry(image: np.ndarray, order: float = 2) -> np.ndarray:
    if issubclass(image.dtype.type, np.integer):
        max_value = np.iinfo(image.dtype).max
    else:
        max_value = np.finfo(image.dtype).max
    tic()
    # using fake image for now
    whl, rgb2 = fake_create_sym(image, order)
    #whl, rgb2 = create_sym(image,order)
    print(toc())
    #clrwhl , rgb = create_sym(img, order)
    #print(img.shape)
    return rgb2
Пример #5
0
def main():
    Configuration.para = True
    config.GC_BFS = False
    config.K = 1024

    models = []

    vnf_set = generate_vnf_set(size=30)
    for i in [10]:
        topo = topology.vl2_topo(port_num_of_aggregation_switch=i,
                                 port_num_of_tor_for_server=i)
        model = Model(
            topo,
            generate_sfc_list2(topo=topo,
                               vnf_set=vnf_set,
                               size=600,
                               base_idx=0))
        model.sfc_list = model.sfc_list[:2 * len(model.servers())]
        models.append(model)

    greedy_results = {}

    for model in models:
        result = {}
        model.clear()
        tic()
        result['greedy'] = PARC(model)
        result['greedy time'] = toc()

        print_dict_result(result, model)
        greedy_results[len(model.servers())] = result

    save_obj(greedy_results, "./results/time/greedy_{}".format(current_time()))

    rorp_results = {}
    for model in models:
        result = {}
        model.clear()
        tic()
        result['optimal'] = linear_programming(model)
        result['RORP'] = ROR(model)
        result['RORP time'] = toc()

        print_dict_result(result, model)
        rorp_results[len(model.servers())] = result

    save_obj(rorp_results, "./results/time/rorp_{}".format(current_time()))
Пример #6
0
def main(unused_argv):
    # Using the Winograd non-fused algorithms provides a small performance boost.
    os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'

    pred_hooks = None
    if FLAGS.debug:
        debug_hook = tf_debug.LocalCLIDebugHook()
        pred_hooks = [debug_hook]

    model = tf.estimator.Estimator(
        model_fn=deeplab_model.deeplabv3_plus_model_fn,
        model_dir=FLAGS.model_dir,
        params={
            'output_stride': FLAGS.output_stride,
            'batch_size':
            1,  # Batch size must be 1 because the images' size may differ
            'base_architecture': FLAGS.base_architecture,
            'pre_trained_model': None,
            'batch_norm_decay': None,
            'num_classes': _NUM_CLASSES,
        })

    examples = dataset_util.read_examples_list(FLAGS.infer_data_list)
    image_files = [
        os.path.join(FLAGS.data_dir, filename) for filename in examples
    ]
    predictions = model.predict(
        input_fn=lambda: preprocessing.eval_input_fn(image_files),
        hooks=pred_hooks)

    output_dir = FLAGS.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for pred_dict, image_path in zip(predictions, image_files):

        tic()
        image_basename = os.path.splitext(os.path.basename(image_path))[0]
        output_filename = image_basename + '_mask.png'
        path_to_output = os.path.join(output_dir, output_filename)
        print("generating:", path_to_output)
        mask = pred_dict['decoded_labels']
        mask = Image.fromarray(mask)
        plt.axis('off')
        plt.imshow(mask)
        plt.savefig(path_to_output, bbox_inches='tight')
        print("Time elapsed (sec)", toc())
Пример #7
0
    def test_buffer_values(self):
        """Test values collected in nesting"""
        num_iters = 10
        delta_gt = 0.5
        values = []
        tic()
        time.sleep(delta_gt)
        elapsed = toc()
        values.append(elapsed)
        for _ in range(num_iters):
            tic()
            time.sleep(delta_gt)
            elapsed = toc()
            values.append(elapsed)

        delta = mean(values)
        diff = abs(delta_gt - delta)

        assert diff < 0.01, ("Diff is too big. Expected diff: 0.01"
                             ", obtained diff {}".format(diff))
Пример #8
0
                save_encodings_dict(face_encoding_dictionary)
        return vertex_id

    if num_faces == 1 and face_location is None:
        face_enc = tuple(FR.face_encodings(img)[0])
        v_id = recognize_core(face_enc)
        return v_id
    elif face_location is None:
        img2 = Image.fromarray(img.astype('uint8'))
        rawBytes = io.BytesIO()
        img2.save(rawBytes, "JPEG")
        rawBytes.seek(0)
        img_base64 = base64.b64encode(rawBytes.read())
        return img_base64, FR.face_locations(img)
    else:
        locations = FR.face_locations(img)
        encodings = FR.face_encodings(img)
        for location, encoding in zip(locations, encodings):
            if location == tuple(face_location):
                v_id = recognize_core(tuple(encoding))
                return v_id


if __name__ == '__main__':
    from Tree.Utils.ImageReducer import reduce

    reduce('../people/faces/pending/Picture.JPG')
    tic()
    print(number_of_faces('../people/faces/pending/Picture.JPG'))
    print(toc())
Пример #9
0
def password_cracker(type):
    tic()
    condition_1 = []
    condition_2 = []
    p_range = range(272092, 815432)

    def Cond_1():

        for i in p_range:
            dx = [str(i)[x] for x in range(0, 6)]
            #print(dx)
            #This commented out block worked, but I did not like the long length and it seemed quite clunky#
            ##            for x in range(len(p_range)):
            ##            if ((dx[0] == dx[1]) and (dx[1] != dx[2])) or ((dx[0] != dx[1]) and (dx[1] == dx[2]) and (dx[2] != dx[3])) or ((dx[1] != dx[2]) and(dx[2] == dx[3]) and (dx[3] != dx[4])) or ((dx[2] != dx[3])and (dx[3] == dx[4]) and (dx[4] != dx[5])) or ((dx[3] != dx[4]) and (dx[4] == dx[5])) :
            ##                condition_1.append(i)

            #This next block was used to replace the long line seen above. Still hard-coded in a way, but it works and looks a but nicer#
            if ((dx[0] == dx[1]) and (dx[1] != dx[2])):
                condition_1.append(i)
            for ix in range(1, 4):
                if (dx[ix - 1] != dx[ix]) and (dx[ix] == dx[ix + 1]) and (
                        dx[ix + 1] != dx[ix + 2]):
                    condition_1.append(i)
            if ((dx[3] != dx[4]) and (dx[4] == dx[5])):
                condition_1.append(i)

        return condition_1

    def Cond_2():
        for i in p_range:
            dx = [str(i)[x] for x in range(0, 6)]
            if dx[0] <= dx[1] and dx[1] <= dx[2] and dx[2] <= dx[3] and dx[
                    3] <= dx[4] and dx[4] <= dx[5]:
                condition_2.append(i)
        return condition_2

##    for i in range(101,121):
##        for x in range(0,3):
##            d_x = str(i)[x]
##            print(dx)

    def numcon_1():
        for i in p_range:
            dx = [i // 10**n % 10 for n in range(0, 6)]
            if ((dx[0] == dx[1]) and (dx[1] != dx[2])):
                condition_1.append(i)
            for ix in range(1, 4):
                if (dx[ix - 1] != dx[ix]) and (dx[ix] == dx[ix + 1]) and (
                        dx[ix + 1] != dx[ix + 2]):
                    condition_1.append(i)
            if ((dx[3] != dx[4]) and (dx[4] == dx[5])):
                condition_1.append(i)
        return condition_1
        print(dx)

    def numcon_2():
        for i in p_range:
            dx = [i // 10**n % 10 for n in range(0, 6)]
            ##            for y in range(0,5):
            ##                if dx[y] > dx[y+1]:
            ##                    continue
            ##                else:
            ##                    condition_2.append(i)
            if dx[0] >= dx[1] and dx[1] >= dx[2] and dx[2] >= dx[3] and dx[
                    3] >= dx[4] and dx[4] >= dx[
                        5]:  # Minor difference here is that the number method gets the number seperated in reverse order when n goes from 0 to 5, so need to reverse the inequalities
                condition_2.append(i)
        return condition_2

    if type == 'num':
        both_conditions = set(numcon_1()) & set(
            numcon_2()
        )  #Here, can use either numcon_X or Cond_X, one uses strings the other numerical methods to compare the digits
    if type == 'str':
        both_conditions = set(Cond_1()) & set(Cond_2())
    print(len(both_conditions))
    #print(both_conditions)
    elapsed = toc()
    print(elapsed)
    return elapsed