コード例 #1
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def scheduleIsValid(network: STN, schedule: dict) -> STN:
    ## Check that the schedule is actually defined on all relevant vertices
    # This number is arbitrary - any sufficiently small, positive constant works
    epsilon = 0.001
    vertices = network.getAllVerts()
    for vertex in vertices:
        vertexID = vertex.nodeID
        assert vertexID in schedule

    # Check that the schedule is valid
    edges = network.getAllEdges()
    for edge in edges:
        # Loop through the constraints
        start = edge.i
        fin = edge.j
        uBound = edge.Cij
        lBound = -edge.Cji

        boundedAbove = (schedule[fin] - schedule[start]) <= uBound + epsilon
        boundedBelow = (schedule[fin] - schedule[start]) >= lBound - epsilon

        # Check if constraint is not satisfied
        if ((not boundedAbove) or (not boundedBelow)):
            return False

    return True
コード例 #2
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def sample(STN, success='default', LP='original', gauss=False):
    if LP == 'original':
        _, bounds, epsilons = originalLP(STN.copy(), naiveObj=False)
    elif LP == 'proportion':
        _, _, bounds, epsilons = proportionLP(STN.copy())
    else:
        _, _, bounds, epsilons = maxminLP(STN.copy())

    original, shrinked = newInterval(STN, epsilons)
    if not gauss:
        degree = calculateMetric(original, shrinked)[2]
    else:
        degree = calculateMetric(original, shrinked, gauss)

    schedule = {}
    for i in list(STN.verts.keys()):
        if i not in STN.uncontrollables:
            time = (bounds[(i, '-')].varValue + bounds[(i, '+')].varValue) / 2
            schedule[i] = time

    # Collect the sample data.
    count = 0
    for i in range(10000):
        if success == 'default':
            result = sampleOnce(original, shrinked, gauss)
        else:
            result = altSampleOnce(STN, schedule.copy(), gauss)
        if result:
            count += 1

    success = float(count / 10000)

    return degree, success
コード例 #3
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def processOptimal():
    json_folder = input("Please input folder with json file:\n")
    json_list = glob.glob(os.path.join(json_folder, '*.json'))

    result = {}
    for fname in json_list:
        p, f = os.path.split(fname)
        print("Processing: ", f)

        STN = loadSTNfromJSONfile(fname)
        new_STN, count = relaxSearch(STN.copy())
        new, orig, degree = dynamicMetric(STN.copy(), new_STN.copy())

        result[f] = {}
        result[f]['shrinked'] = new
        result[f]['original'] = orig
        result[f]['degree'] = degree

    output_folder = input("Please input output folder:\n")
    filename = os.path.join(output_folder, 'result_optimal.json')

    with open(filename, 'w') as f:
        json.dump(result, f)

    return result
コード例 #4
0
 def _read_stn(self, file):
     network = STN()
     state = ""
     for line in file:
         if line.startswith('#'):
             if "points" in line.lower() and "num" in line.lower():
                 state = "NO_POINTS"
             elif "edges" in line.lower() and "num" in line.lower():
                 state = "NO_EDGES"
             elif "links" in line.lower():
                 state = "NO_LINKS"
             elif "names" in line.lower():
                 state = "NAMES"
             elif "edges" in line.lower():
                 state = "EDGES"
                 edge_counter = 0
             elif "links" in line.lower():
                 state = "LINKS"
             else:
                 pass
         else:
             if state == 'NO_POINTS':
                 num_points = int(line)
                 network.length = num_points
                 network.successor_edges = [{} for i in range(num_points)]
                 network.names_list = ["0" for i in range(num_points)]
             elif state == 'NO_EDGES':
                 num_edges = int(line)
             elif state == 'NO_LINKS':
                 # for testing, throw an error
                 raise Exception(
                     "Simple Temporal Networks do not have contingent links."
                 )
             elif state == 'NAMES':
                 list_of_nodes = line.split()
                 if len(list_of_nodes) != num_points:
                     raise Exception(
                         "Number of names does not match the number of nodes provided"
                     )
                 for idx, node_name in enumerate(list_of_nodes):
                     network.names_dict[node_name] = idx
                     network.names_list[idx] = node_name
             elif state == 'EDGES':
                 weights = line.split()
                 edge_counter += 1
                 # make a list of list of tuples
                 idx_node = network.names_dict[weights[0]]
                 idx_successor = network.names_dict[weights[2]]
                 network.successor_edges[idx_node][idx_successor] = int(
                     weights[1])
             elif state == 'LINKS':
                 raise Exception(
                     "Simple Temporal Networks do not have contingent links."
                 )
             else:
                 pass
     if num_edges != edge_counter:
         raise Exception(
             "Number of edges does not match the number given above")
     return network
コード例 #5
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def readNeos(filename, json_folder):
    f = open(filename, 'r')
    for i in range(3):
        line = f.readline()

    obj_value = float(line[17:])
    actual = math.exp(obj_value)

    p, f = os.path.split(filename)
    fname = f[:-4] + '.json'
    json_file = os.path.join(json_folder, fname)

    STN = loadSTNfromJSONfile(json_file)
    result, conflicts, bounds, weight = DC_Checker(STN.copy(), report=False)
    contingent = bounds['contingent']

    total = 1
    for (i, j) in list(STN.contingentEdges.keys()):
        edge = STN.contingentEdges[(i, j)]
        length = edge.Cij + edge.Cji
        total *= length

        if (i, j) not in contingent:
            actual *= length

    return actual, total, float(actual / total)
コード例 #6
0
ファイル: simulation.py プロジェクト: HEATlab/DCforPSTN
def early_execution(network: STN, realization: dict) -> bool:
    ## Bookkeeping for events
    all_uncontrollables = set(network.uncontrollables)
    unused_events = set(network.verts.keys())
    not_scheduled = PriorityQueue()
    final_schedule = {}

    # Mapping from contingent sources to uncontrollables
    contingent_pairs = network.contingentEdges.keys()
    disabled_uncontrollables = {src: sink for (src, sink) in contingent_pairs}

    # Initialize bounds for simulation - starts off with just controllables
    # and zero time point
    controllable_bounds = find_bounds(network)
    true_weight = {}
    for event in controllable_bounds:
        not_scheduled.push(event, controllable_bounds[0])
        true_weight[event] = controllable_bounds[0]
    not_scheduled.addOrDecKey(ZERO_ID, 0)

    # Run simulation
    old_time = 0
    while len(unused_events) > 0:
        current_time, activated_event = not_scheduled.pop()

        # This check ensures that we popped out a valid time_point
        # A better way to deal with this would be to just figure out a way to
        # increase priorities of elements in a heap
        if activated_event in true_weight:
            if true_weight[activated_event] > current_time:
                continue

        unused_events.remove(activated_event)
        final_schedule[activated_event] = current_time

        assert old_time < current_time, "Chronology violated!"

        if activated_event in disabled_uncontrollables:
            # If this is a contingent source, we add the associated uncontrollable sink
            # to the queue
            uncontrollable = disabled_uncontrollables[activated_event]
            delay = realization[uncontrollable]
            not_scheduled.push(uncontrollable, current_time + delay)

        # Update the bounds for all other timepoints
        # We only care about events being moved later in time
        relevant_edges = network.getEdges(activated_event)
        for edge in relevant_edges:
            if (edge.j == activated_event) and (edge.i
                                                not in all_uncontrollables):
                if needs_early_update(edge, activated_event, current_time,
                                      true_weight):
                    lower_bound = current_time - edge.Cij
                    true_weight[edge.i] = lower_bound

        # Keep track of this for next iteration of loop
        old_time = current_time
    # Check if we dispatched succesfully
    return emp.scheduleIsValid(network, final_schedule)
コード例 #7
0
ファイル: simulation.py プロジェクト: HEATlab/DCforPSTN
def find_bounds(network: STN) -> dict:
    # Add zero timepoint
    if ZERO_ID not in network.verts:
        network.addVertex(ZERO_ID)
    # Add bounds relative to zero timepoint
    adjacent_to_zero = set(network.getAdjacent(ZERO_ID))
    events = network.verts.keys()
    bounds = {}
    for event in events:
        if (event != ZERO_ID) and (event not in adjacent_to_zero):
            return 0
        else:
            return 0

    # To make sure zero timepoint starts first
    bounds[ZERO_ID] = (-1.0, 0.0)
    return bounds
コード例 #8
0
 def _read_stnu(self, file):
     network = STN()
     state = ""
     for line in file:
         if line.startswith('#'):
             if "points" in line.lower() and "num" in line.lower():
                 state = "NO_POINTS"
             elif "edges" in line.lower() and "num" in line.lower():
                 state = "NO_EDGES"
             elif "links" in line.lower():
                 state = "NO_LINKS"
             elif "names" in line.lower():
                 state = "NAMES"
             elif "edges" in line.lower():
                 state = "EDGES"
             elif "links" in line.lower():
                 state = "LINKS"
             else:
                 raise Exception("Invalid Network Type")
         else:
             if state == 'NO_POINTS':
                 num_points = int(line)
                 network.length = num_points
                 network.successor_edges = [{} for i in range(num_points)]
                 network.names_list = ["0" for i in range(num_points)]
             elif state == 'NO_EDGES':
                 num_edges = int(line)
             elif state == 'NO_LINKS':
                 no_links = int(line)
             elif state == 'NAMES':
                 list_of_nodes = line.split()
                 if len(list_of_nodes) != num_points:
                     raise Exception(
                         "Number of names does not match the number of nodes provided"
                     )
                 for idx, node_name in enumerate(list_of_nodes):
                     network.names_dict[node_name] = idx
                     network.names_list[idx] = node_name
             elif state == 'EDGES':
                 weights = line.split()
                 # make a list of list of tuples
                 idx_node = network.names_dict[weights[0]]
                 idx_successor = network.names_dict[weights[2]]
                 network.successor_edges[idx_node][idx_successor] = int(
                     weights[1])
             elif state == 'LINKS':
                 # deal with contingent links later
                 pass
             else:
                 pass
コード例 #9
0
ファイル: stnjsontools.py プロジェクト: HEATlab/Prob-in-Ctrl
def loadSTNfromJSONobj(jsonSTN, using_PSTN=False):
    stn = STN()

    # Add the root vertex and put it in the T_x set
    stn.addVertex(0)

    # Add the vertices
    for v in jsonSTN['nodes']:
        stn.addVertex(v['node_id'])

    # Add the edges
    for e in jsonSTN['constraints']:
        if using_PSTN and 'distribution' in e:
            stn.addEdge(e['first_node'], e['second_node'],
                        float(e['min_duration']), float(e['max_duration']),
                        e['type'], e['distribution']['name'])
        else:
            stn.addEdge(e['first_node'], e['second_node'],
                        float(e['min_duration']), float(e['max_duration']),
                        e['type'])

    return stn
コード例 #10
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def computeDynamic(nlp=False):
    uncertain_folder = input("Please input uncertain STNUs folder:\n")
    chain_folder = input("Please input chain STNUs folde:\n")

    listOfFile = []
    listOfFile += glob.glob(os.path.join(uncertain_folder, '*.json'))
    listOfFile += glob.glob(os.path.join(chain_folder, '*.json'))

    degree = {}
    for fname in listOfFile:
        p, f = os.path.split(fname)
        print("Processing: ", f)

        STN = loadSTNfromJSONfile(fname)
        new_STN, count = relaxSearch(STN.copy(), nlp=nlp)

        if not new_STN:
            degree[f] = 0
        else:
            degree[f] = dynamicMetric(STN.copy(), new_STN.copy())[2]

    return degree
コード例 #11
0
ファイル: model.py プロジェクト: jantje676/cross-modal
def EncoderImage(data_name, img_dim, embed_size, n_attention, n_detectors, pretrained_alex, rectangle, precomp_enc_type='basic',
                 no_imgnorm=False, net="alex", div_transform=False):
    """A wrapper to image encoders. Chooses between an different encoders
    that uses precomputed image features.
    """

    # TEST: use spatial transformers
    if precomp_enc_type == "trans":
        img_enc = STN(n_detectors, embed_size, pretrained_alex, rectangle, net)
    # basic SCAN image encoder
    elif precomp_enc_type == 'basic':
        img_enc = EncoderImagePrecomp(
            img_dim, embed_size, no_imgnorm)
    # basic SCAN image encoder with weight normalization
    elif precomp_enc_type == 'weight_norm':
        img_enc = EncoderImageWeightNormPrecomp(
            img_dim, embed_size, no_imgnorm)
    # TEST: use of attention module
    elif precomp_enc_type == "attention":
        img_enc = EncoderImageAttention(
            img_dim, embed_size, n_attention, no_imgnorm)
    # TEST: train one CNN end2end with spatial segmentations
    elif precomp_enc_type == "cnn":
        img_enc = CNN_end2end(img_dim, embed_size)
    # use of Layers-SCAN with different models
    elif precomp_enc_type == "layers":
        if net == "alex":
            img_enc = LayersModel(img_dim, embed_size)
        elif net == "res":
            img_enc = Layers_resnest(img_dim, embed_size)
        elif net == "res_deep":
            img_enc = LayersScanResDeep()
    # TEST: use layers model without padding but create equal dimension with fc-layer
    elif precomp_enc_type == "layers_same":
            img_enc = LayersModelSame(img_dim, embed_size)
    # attention module Layers-attention-SCAN
    elif precomp_enc_type == "layers_attention":
        img_enc = LayerAttention2(img_dim, embed_size, n_attention, no_imgnorm, net)
    # TEST: attention module Layers-attention-SCAN with residual image
    elif precomp_enc_type == "layers_attention_res":
        img_enc = LayerAttention3( img_dim, embed_size, n_attention, no_imgnorm)
    # TEST: with fusing Layers-attention-SCAN with prediction of clothing item
    elif precomp_enc_type == "layers_attention_im":
        img_enc = LayerAttention4(img_dim, embed_size, n_attention, no_imgnorm=False, net='alex')
    # use n CNNs in parrarel with diversity loss
    elif precomp_enc_type == "cnn_layers":
        img_enc = CNN_layers(n_detectors, embed_size, pretrained_alex, net, div_transform)
    else:
        raise ValueError("Unknown precomp_enc_type: {}".format(precomp_enc_type))

    return img_enc
コード例 #12
0
ファイル: dispatch.py プロジェクト: HEATlab/DCforPSTN
def simulation(network: STN, size: int, verbose=False, gauss=False, relaxed=False) -> float:
    # Collect useful data from the original network
    contingent_pairs = network.contingentEdges.keys()
    contingents = {src: sink for (src, sink) in contingent_pairs}
    uncontrollables = set(contingents.values())

    if relaxed:
        dispatching_network, count, cycles, weights = relaxSearch(getMinLossBounds(network.copy(), 2))
        if dispatching_network == None:
            dispatching_network = network
    else:
        dispatching_network = network

    total_victories = 0
    dc_network = STNtoDCSTN(dispatching_network)
    dc_network.addVertex(ZERO_ID)

    controllability = dc_network.is_DC()
    if verbose:
        print("Finished checking DC...")

    # Detect if the network has an inconsistency in a fixed edge
    verts = dc_network.verts.keys()
    for vert in verts:
        if (vert, vert) in dc_network.edges:
            if verbose:
                print("Checking", vert)
            edge = dc_network.edges[vert, vert][0]
            if edge.weight < 0:
                dc_network.edges[(vert, vert)].remove(edge)
                dc_network.verts[vert].outgoing_normal.remove(edge)
                dc_network.verts[vert].incoming_normal.remove(edge)
                del dc_network.normal_edges[(vert, vert)]

    # Run the simulation
    for j in range(size):
        realization = generate_realization(network, gauss)
        copy = dc_network.copy()
        result = dispatch(dispatching_network, copy, realization, contingents,
                          uncontrollables, verbose)
        if verbose:
            print("Completed a simulation.")
        if result:
            total_victories += 1

    goodie = float(total_victories / size)
    if verbose:
        print(f"Worked {100*goodie}% of the time.")

    return goodie
コード例 #13
0
ファイル: simulation.py プロジェクト: HEATlab/DCforPSTN
def safely_scheduled(network: STN, partial: dict, event) -> bool:
    assert event in partial, "Event not in partial schedule!"
    epsilon = 0.001
    edges = network.getEdges(event)
    for edge in edges:
        if (edge.i in partial) and (edge.j in partial):
            start, end = (edge.i, edge.j)
            lBound, uBound = (-edge.Cji, edge.Cij)

            boundedAbove = (partial[end] - partial[start]) <= uBound + epsilon
            boundedBelow = (partial[end] - partial[start]) >= lBound - epsilon

            if ((not boundedAbove) or (not boundedBelow)):
                #print("Violated constraint", edge)
                return False
    return True
コード例 #14
0
def train():
    """Train STN"""
    params = Params()
    with tf.Graph().as_default():
        model = STN(params.gpu, params)

        saver = tf.train.Saver(tf.global_variables())
        tf_config = tf.ConfigProto(allow_soft_placement=True)
        tf_config.gpu_options.allow_growth = True
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess = tf.Session(config=tf_config)
        sess.run(init_op)

        initial_step = 0
        global_step = model.global_step

        ckpt = tf.train.get_checkpoint_state(params.train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            initial_step = global_step.eval(session=sess)

        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(params.train_dir, sess.graph)
        summary_op = tf.summary.merge_all()

        for step in xrange(initial_step, params.max_steps):
            if step % params.summary_step == 0:
                op_list = [model.train_op, model.loss, summary_op]
                _, loss_value, summary_str = sess.run(op_list)
                summary_writer.add_summary(summary_str, step)
                print('loss: {}'.format(loss_value))
            else:
                _, loss_value = sess.run([model.train_op, model.loss])

            if step % 10 == 0:
                format_str = ('%s: step %d, loss = %.2f')
                print(format_str % (datetime.now(), step, loss_value))
                sys.stdout.flush()

            # Save the model checkpoint periodically.
            if step % params.checkpoint_step == 0 or (step +
                                                      1) == params.max_steps:
                checkpoint_path = os.path.join(params.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
コード例 #15
0
ファイル: simulation.py プロジェクト: HEATlab/DCforPSTN
def set_dynamic_zeropoint(network: STN):
    network = network.copy()
    largish = 1000000.0

    if ZERO_ID not in network.verts:
        network.addVertex(ZERO_ID)

    adjacent_events = set(network.getAdjacent(ZERO_ID))
    for event in network.verts:
        if (event not in adjacent_events) and (event != ZERO_ID):
            network.addEdge(ZERO_ID, event, 0.0, largish)

    return network
コード例 #16
0
def prepareDynamic(STN):
    epsilons = {}
    result, conflicts, bounds, weight = DC_Checker(STN.copy())

    contingent = bounds['contingent']

    constraint = ''
    Obj = ''
    for i, j in list(contingent.keys()):
        edge, bound = contingent[(i, j)]
        length = edge.Cij + edge.Cji
        epsilons[j] = ('EPS_%i' % j, 0, length)

        constraint += epsilons[j][0] + ' + '
        Obj += 'log(' + str(length) + ' - ' + epsilons[j][0] + ') + '

    constraint = constraint[:-3] + ' >= ' + str(-weight)
    Obj = Obj[:-3]

    return epsilons, constraint, Obj
コード例 #17
0
def modelObjDynamic(STN, fname):
    epsilons, constraint, Obj = prepareDynamic(STN.copy())

    f = open(fname, 'w')
    for v, l, h in list(epsilons.values()):
        line = 'var ' + v + ' >= ' + str(l)

        if h != None:
            line += ', <= ' + str(h)

        line += ';'
        f.write(line + '\n')

    Obj_line = '\nmaximize VOLUME: ' + Obj + ';\n\n'
    f.write(Obj_line)

    constraint_line = 'subject to WEIGHT: ' + constraint + ';\n'
    f.write(constraint_line)

    f.close()
コード例 #18
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def compare(actual_Dict):
    dynamic_folder = input("Please input directory with DC STNUs:\n")
    uncertain_folder = input("Please input directory with uncertain STNUs:\n")

    compare_Dict = {}
    for x in list(actual_Dict.keys()):
        actual_volume = actual_Dict[x]

        if x[:7] == 'dynamic':
            fname = os.path.join(dynamic_folder, x + '.json')
        else:
            fname = os.path.join(uncertain_folder, x + '.json')

        STN = loadSTNfromJSONfile(fname)

        _, _, epsilons = originalLP(STN.copy())
        original, shrinked = newInterval(STN, epsilons)

        old, new, degree = calculateMetric(original, shrinked)
        actual = float(actual_volume / old)
        compare_Dict[x] = (degree, actual)

    return compare_Dict
コード例 #19
0
ファイル: train.py プロジェクト: liuf1989/TFSTN
def train():
    """Train STN"""
    # load data
    print("loading MNIST dataset...")
    trainData, validData, testData = loadMNIST("data/MNIST.npz")
    batch_size = 50
    with tf.Graph().as_default():
        model = STN(FLAGS.gpu)

        saver = tf.train.Saver(tf.global_variables())
        tfConfig = tf.ConfigProto(allow_soft_placement=True)
        tfConfig.gpu_options.allow_growth = True
        init = tf.global_variables_initializer()
        sess = tf.Session(config=tfConfig)
        sess.run(init)

        initial_step = 0
        global_step = model.global_step

        ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            initial_step = global_step.eval(session=sess)

        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
        summary_op = tf.summary.merge_all()

        for step in xrange(initial_step, FLAGS.max_steps):
            start_time = time.time()

            # generate training data
            rand_idx = np.random.randint(len(trainData["image"]),
                                         size=batch_size)
            image_per_batch = trainData["image"][rand_idx]
            label_per_batch = trainData["label"][rand_idx]
            image_per_batch = np.reshape(image_per_batch,
                                         [batch_size, 28, 28, 1])
            feed_dict = {
                model.image_input: image_per_batch,
                model.labels: label_per_batch,
            }

            if step % FLAGS.summary_step == 0:
                op_list = [model.train_op, model.loss, summary_op]
                _, loss_value, summary_str = sess.run(op_list,
                                                      feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                print('loss: {}'.format(loss_value))
            else:
                _, loss_value = sess.run([model.train_op, model.loss],
                                         feed_dict=feed_dict)

            duration = time.time() - start_time

            if step % 10 == 0:
                num_images_per_step = batch_size
                images_per_sec = num_images_per_step / duration
                sec_per_batch = float(duration)
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    images_per_sec, sec_per_batch))
                sys.stdout.flush()

            # Save the model checkpoint periodically.
            if step % FLAGS.checkpoint_step == 0 or (step +
                                                     1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
コード例 #20
0
ファイル: stnjsontools.py プロジェクト: HEATlab/durability
def loadSTNfromJSONobj(jsonSTN, reduction=True, using_PSTN=True):
    stn = STN()

    # Add the root vertex and put it in the T_x set
    stn.addVertex(0, 0, None)
    # TODO: wtf? Why are we executing a point outside of a simulation in the
    # first place?
    stn.execute(0)
    agents = []

    # Add the vertices
    for v in jsonSTN['nodes']:
        # Accumulate a list of all the owners to retrieve the agents.
        if not v['owner_id'] in agents:
            agents.append(v['owner_id'])

        # We don't necessarily need a location, just set it to None if we don't.
        if not ('location' in v):
            v['location'] = None

        stn.addVertex(v['node_id'], v['local_id'], v['owner_id'],
                      v['location'])

        # TODO: Change edge adding to allow integers to refer to vertecies,
        # rather than actually connecting integers together. (silly legacy support)
        stn.addEdge(0, v['node_id'], float(v['min_domain']),
                    float(v['max_domain']))
        if 'executed' in v:
            if v['executed']:
                stn.execute(v['node_id'])

    # Add the edges
    for e in jsonSTN['constraints']:
        if 'distribution' in e and using_PSTN:
            stn.addEdge(e['first_node'], e['second_node'],
                        float(e['min_duration']), float(e['max_duration']),
                        e['distribution']['name'])
        else:
            stn.addEdge(e['first_node'], e['second_node'],
                        float(e['min_duration']), float(e['max_duration']))

    #numAgents = jsonSTN['num_agents']

    stn.numAgents = len(agents)  # NOTE: deprecated, agents replaces numAgents.
    stn.agents = agents

    if reduction:
        # Triangulate the STN
        stnCopy = stn.copy()
        agentWait = []
        # Set up the wait timer for agent load balancing
        for a in stn.agents:
            agentWait.append(0)

        # Perform the reduction
        while len(stnCopy.verts) > 1:
            for a in stn.agents:
                if agentWait[a] == 0:
                    created = stnreduce(stnCopy, a, stn)
                    agentWait[a] = created
                else:
                    agentWait[a] -= 1

    # Return back an dictionary for easy labelling of return types.
    # FIXME: Remove the deprecated numAgents value
    output_dict = {'stn': stn, 'agent_count': stn.numAgents}

    # Ideally, this would return a class, however, this is already butchering
    # quite a bit of legacy support, and a class would end up being even more
    # damaging.
    return output_dict
コード例 #21
0
ファイル: stnjsontools.py プロジェクト: HEATlab/DCforPSTN
def loadSTNfromJSONobj(jsonSTN, using_PSTN=True):
    stn = STN()

    # Add the root vertex and put it in the T_x set
    stn.addVertex(0)

    # Add the vertices
    for v in jsonSTN['nodes']:
        stn.addVertex(v['node_id'])
        if 'min_domain' in v:       
            stn.addEdge(0, v['node_id'], float(v['min_domain']),
                float(v['max_domain']))
        else:
            if not stn.edgeExists(0, v['node_id']):
                stn.addEdge(0,v['node_id'], float(0), float('inf'))


    # Add the edges
    for e in jsonSTN['constraints']:
        if stn.edgeExists(e['first_node'], e['second_node']):
            stn.updateEdge(e['first_node'], e['second_node'],float(e['max_duration']))
            stn.updateEdge(e['second_node'], e['first_node'],float(e['min_duration']))
        else:
            if using_PSTN and 'distribution' in e:
                stn.addEdge(e['first_node'], e['second_node'],
                            float(max(0,e['min_duration'])), float(e['max_duration']),
                            e['distribution']['type'], e['distribution']['name'])
            elif 'type' in e:
                if e['type'] == 'stcu':
                    dist = "U_"+str(e['min_duration']) + "_" + str(e['max_duration'])
                    stn.addEdge(e['first_node'], e['second_node'],
                        float(max(0,e['min_duration'])), float(e['max_duration']),
                        e['type'], dist)
                else:
                    stn.addEdge(e['first_node'], e['second_node'],
                                float(e['min_duration']), float(e['max_duration']),
                                e['type'])
            else:
                stn.addEdge(e['first_node'], e['second_node'],
                            float(e['min_duration']), float(e['max_duration']))

    return stn
コード例 #22
0
def prepare(STN):
    bounds = {}
    epsilons = {}
    constraints = {}

    for i in STN.verts:
        if STN.getEdgeWeight(0, i) == float('inf'):
            STN.setMakespan(MAX_FLOAT)
            break

    for i in list(STN.verts.keys()):
        bounds[(i, '+')] = ('T%iHI' % i, 0, STN.getEdgeWeight(0, i))

        low = 0 if STN.getEdgeWeight(i,0) == float('inf') else\
                            -STN.getEdgeWeight(i,0)

        bounds[(i, '-')] = ('T%iLO' % i, low, None)

        if i in STN.uncontrollables:
            epsilons[(i, '-')] = ('EPS%iLO' % i, 0, None)
            epsilons[(i, '+')] = ('EPS%iHI' % i, 0, None)
            constraints[i] = bounds[(i, '-')][0] + " <= " + bounds[(i, '+')][0]
        elif i == 0:
            constraints[(i, '-')] = bounds[(i, '-')][0] + " == 0"
            constraints[(i, '+')] = bounds[(i, '+')][0] + " == 0"
        else:
            constraints[i] = bounds[(i, '-')][0] + " == " + bounds[(i, '+')][0]

    for i, j in STN.edges:
        if (i, j) in STN.contingentEdges:

            constraints[(i, j, '+')] = bounds[(j, '+')][0] + ' - ' + \
                bounds[(i, '+')][0] + ' == ' + str(STN.getEdgeWeight(i,j)) \
                + ' - ' + epsilons[(j, '+')][0]
            constraints[(i, j, '-')] = bounds[(j, '-')][0] + ' - ' + \
                bounds[(i, '-')][0] + ' == ' + str(-STN.getEdgeWeight(j,i))\
                + ' + ' + epsilons[(j, '-')][0]

        else:
            # NOTE: We need to handle the infinite weight edges. Otherwise
            #       the LP would be infeasible
            high = MAX_FLOAT if STN.getEdgeWeight(i,j) == float('inf') \
                                            else STN.getEdgeWeight(i,j)
            low = MAX_FLOAT if STN.getEdgeWeight(j,i) == float('inf') \
                                            else STN.getEdgeWeight(j,i)

            constraints[(i, j, '+')] = bounds[(j, '+')][0] + ' - ' + \
                                        bounds[(i, '-')][0] + ' <= ' + str(high)
            constraints[(i, j, '-')] = bounds[(i, '+')][0] + ' - ' + \
                                        bounds[(j, '-')][0] + ' <= ' + str(low)

    Obj = ''
    for i in STN.uncontrollables:
        Obj += 'log(' + bounds[(i, '+')][0] + ' - ' + bounds[(i, '-')][0] \
                                                                        + ') + '
    Obj = Obj[:-3]

    return bounds, epsilons, constraints, Obj
コード例 #23
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def generateParallelChain(agent, task):
    total_event = ((2 * task) + 1) * agent + 1

    while True:
        new = STN()
        new.addVertex(0)

        for i in range(total_event):
            new.addVertex(i + 1)

        contingent = True
        for i in range(agent):
            start = ((2 * task) + 1) * i + 1
            end = ((2 * task) + 1) * (i + 1)
            new.addEdge(0, start, 0, 15)

            for j in range(start, end):
                type = 'stcu' if contingent else 'stc'
                contingent = not contingent

                if type == 'stcu':
                    # low = round(random.uniform(10, 20), 2)
                    # high = round(random.uniform(30, 40), 2)
                    low = random.randint(10, 20)
                    high = random.randint(30, 40)
                    new.addEdge(j, j + 1, low, high, type='stcu')
                else:
                    # low = round(random.uniform(5, 10), 2)
                    # high = round(random.uniform(30, 35), 2)
                    low = random.randint(5, 10)
                    high = random.randint(35, 40)
                    new.addEdge(j, j + 1, low, high)

            new.addEdge(end, total_event, -10, 10)

        num_activity = (2 * task) + 1
        max_length = max([e.Cij + e.Cji for e in list(new.edges.values())])
        up_bound = max_length * num_activity

        # low = round(random.uniform(0.35*up_bound, 0.45*up_bound), 2)
        # high = round(random.uniform(0.5*up_bound, 0.6*up_bound), 2)
        low = random.randint(int(0.45 * up_bound), int(0.53 * up_bound))
        high = random.randint(int(0.55 * up_bound), int(0.65 * up_bound))
        new.addEdge(0, total_event, low, high)

        print("\n\nChecking consistensy...")
        if not new.isConsistent():
            continue

        print("Checking Dynamic Controllability...")
        try:
            result, conflicts, bounds, weight = DC_Checker(new.copy(),
                                                           report=False)
        except Exception:
            continue

        if result:
            return new
コード例 #24
0
ファイル: empirical.py プロジェクト: HEATlab/DCforPSTN
def generateChain(task, free):
    totalEvent = 2 * (task + 1)

    while True:
        new = STN()
        for i in range(totalEvent):
            new.addVertex(i)

        L = [random.randint(0, 100) for i in range(task)]
        s = sum(L)
        L = [int(x / s * free) for x in L]
        diff = free - sum(L)
        L[-1] += diff

        bounds = []
        for i in range(totalEvent - 1):
            type = 'stcu' if i % 2 == 0 else 'stc'
            if type == 'stcu':
                lowBound = random.randint(0, 50)
                length = random.randint(1, 50)
                bounds.append((lowBound, lowBound + length))
                new.addEdge(i, i + 1, lowBound, lowBound + length, type='stcu')
            else:
                lowBound = random.randint(0, 100)
                length = L[int((i - 1) / 2)]
                bounds.append((lowBound, lowBound + length))
                new.addEdge(i, i + 1, lowBound, lowBound + length)

        low = sum([x[0] for x in bounds])
        high = sum([x[1] for x in bounds])
        S = sum([e.Cij + e.Cji for e in list(new.contingentEdges.values())])
        # makespan = random.randint(int(0.5*low), low)
        makespan = low + int(0.6 * S)
        print(low, makespan, high)
        new.addEdge(0, task * 2 + 1, 0, makespan)

        if new.isConsistent():
            return new
コード例 #25
0
ファイル: train.py プロジェクト: Comet2dh/Baseline_v1
          round(sum(rmse) / 8.0, 4))
    print()

if __name__ == '__main__':
    cv2.setNumThreads(0)
    opt = parse_args()
    print(opt)
    Path(opt.ckpt_path).mkdir(parents=True, exist_ok=True)

    train_set = StereoDataset(opt.data_path, opt.list_path, opt.train_split)
    train_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True)
    test_set = StereoDataset(opt.data_path, opt.list_path, opt.test_split)
    test_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=False)

    dpnet = DPN(in_shape=(train_set.height, train_set.width))
    stnet = STN(in_shape=(test_set.height, test_set.width), filt=not opt.no_filt)

    dpnet = dpnet.cuda()
    stnet = stnet.cuda()
    dpnet = nn.DataParallel(dpnet, device_ids=[0, 1])
    stnet = nn.DataParallel(stnet, device_ids=[0, 1])

    dpn_optim = optim.Adam(dpnet.parameters(), lr=opt.lr, weight_decay=opt.decay)
    stn_optim = optim.Adam(stnet.parameters(), lr=opt.lr, weight_decay=opt.decay)

    dpn_sched = optim.lr_scheduler.StepLR(dpn_optim, opt.step, gamma=opt.gamma)
    stn_sched = optim.lr_scheduler.StepLR(stn_optim, opt.step, gamma=opt.gamma)

    # vis = Visualizer(server=opt.server, env=opt.env)

    if opt.resume: