コード例 #1
0
    def dexec(self, cmd):
        """wrapper around docker exec"""

        #docker exec needs cmd a seperate args, not a single string
        cmd = 'docker exec -d $self.id ' + cmd

        r(cmd)
コード例 #2
0
ファイル: pearls.py プロジェクト: dthemg/PEARLS
    def _save_history(self, idx) -> None:
        """Save results
		idx: 	Signal sample iteration index
		"""
        self.w_hat_hist[:, idx] = r(self.w_hat)
        self.freq_hist[:, idx] = r(self.f_mat[:, 0])
        self.rls_hist[:, idx] = r(self.rls)
        self.p1_hist[idx] = self.p1
        self.p2_hist[idx] = self.p2
コード例 #3
0
ファイル: pearls.py プロジェクト: dthemg/PEARLS
    def _update_a(self, fs_updated: bool) -> None:
        """Update a vector and A matrix
		f_updated:  If updates has been done to the frequency matrix
		"""
        if fs_updated:
            self.A = np.exp(as_col(self.tvec) * 2 * np.pi * 1j * r(self.f_mat))
            self.a = as_col(self.A[-1, :])
        else:
            tval = self.t[self.t_stop - 1]
            self.a = np.exp(as_col(2 * np.pi * 1j * r(self.f_mat)) * tval)
            self.A = np.roll(self.A, -1, axis=0)
            self.A[-1, :] = r(self.a)
コード例 #4
0
    def __del__(self):
        """stop and delete the container"""

        r('docker rm -f $self.id')

        try:
            #kill container and remove if it isn't a 'root' container
            self.mnt_fd.close()
            ns_root.ns.remove(self)
            #r('docker kill $self.id')
            #r('docker rm -f $self.id')

            os.remove(self.var_run)
        except:
            pass
コード例 #5
0
def get_volume(me, debug=True):

    total_volume = 0
    M = [[5, 5, 5], [11, 2, 2], [2, 11, 2], [2, 2, 11]]
    W = [-27 / 96., 25 / 96., 25 / 96., 25 / 96.]

    # get non-manifold shells
    non_manifold_shells, union_find = get_non_manifold_shells(me)

    for polygon in me.polygons:

        # find if polygon belongs to a connected component by checking the shell of a vertex of the polygon
        # find takes amortized constant time because of union find path compression
        if union_find.find(polygon.vertices[0]) in non_manifold_shells:
            continue

        n = len(polygon.vertices)
        V = [me.vertices[polygon.vertices[i]] for i in range(n)]

        i = 0
        j = 1
        for k in range(2, n):
            nx, ny, nz = (V[j].co - V[i].co).cross(V[k].co - V[j].co)

            for p in range(4):
                x, y, z = (M[p][0] * V[i].co + M[p][1] * V[j].co +
                           M[p][2] * V[k].co) / 15.
                total_volume += W[p] * (x * nx + y * ny + z * nz) / 3.

            j = k

    if (debug):
        print("Total volume: " + str(r(abs(total_volume))))
コード例 #6
0
 def train():
     start_time = datetime.now()
     print("Getting training data took %s " % utils.r(start_time))
     feed_dict = {
         graph['keep_nodes_probabilities']: 0.8,
         graph['is_training']: True
     }
     _, loss, train_summary, step = sess.run([
         graph['train_op'], graph['loss'], graph['merged_summary_op'],
         graph['step']
     ],
                                             feed_dict=feed_dict)
     train_writer.add_summary(train_summary, step)
     logger.info("Step #%s took %s. Loss: %d \n" %
                 (step, utils.r(start_time), loss))
     return step
コード例 #7
0
ファイル: ga.py プロジェクト: WintersLt/fss16ppp
def uniform_crossover(mom, dad):
    n = len(mom)
    out = []
    for i in range(n):
        if utils.r() < 0.5: out.append(mom[i])
        else: out.append(dad[i])
    return out
コード例 #8
0
def get_area(me, debug=True):

    total_area = 0
    total_blender = 0

    for polygon in me.polygons:
        
        area_polygon = get_polygon_area(me, polygon, debug)
        
        total_area += area_polygon/2
        total_blender += polygon.area

    if debug:
        print("************************************")
        print("Total surface area (own method): " + str(r(total_area)))
        print("Total surface area (blender attribute): " + str(r(total_blender)))
コード例 #9
0
def main():
    # Retrieve the active object (the last one selected)
    ob = bpy.context.active_object

    # Check that it is indeed a mesh
    if not ob or ob.type != 'MESH':
        print("Active object is not a MESH! Aborting...")
        return

    # If we are in edit mode, return to object mode
    bpy.ops.object.mode_set(mode='OBJECT')

    # Retrieve the mesh data
    mesh = ob.data

    # Get current time
    t = time()

    # Function that does all the work
    valence_avg, valence_max, valence_min = [
        r(get_valences(mesh)[i]) for i in range(3)
    ]
    print("\n--------------- VALENCES ---------------")
    print("Average Valence: " + str(valence_avg))
    print("Max Valence: " + str(valence_max))
    print("Min Valence: " + str(valence_min))
    print("----------------------------------------------")

    # Report performance...
    print("Script took %6.3f secs.\n\n" % (time() - t))
コード例 #10
0
def main():
    # Retrieve the active object (the last one selected)
    ob = bpy.context.active_object

    # Check that it is indeed a mesh
    if not ob or ob.type != 'MESH':
        print("Active object is not a MESH! Aborting...")
        return

    # If we are in edit mode, return to object mode
    bpy.ops.object.mode_set(mode='OBJECT')

    # Retrieve the mesh data
    mesh = ob.data

    # Get current time
    t = time()

    # Function that does all the work
    centroid = [r(get_centroid(mesh)[i]) for i in range(3)]
    print("\n--------------- CENTROID -----------------")
    print("Centroid: " + str(centroid))
    print("\n---------------------------------------")

    # Report performance...
    print("Script took %6.2f secs.\n\n" % (time() - t))
コード例 #11
0
def mwsfiddle(old, problem):
    '''Probabilistically either modifies a random decision or does local search'''
    # Pick a decisions
    new = old[:]  # copy
    pos = random.randint(0, len(old) - 1)
    # check probability
    if utils.r() > 0.5:
        # mutate new[pos] until you obtain a valid mutation satisfying all constraints
        return mutate_one(problem, new, pos)
    return local_search(problem, new, pos)
コード例 #12
0
def get_polygon_area(me, polygon, debug=True):
    # Get polygon vertices
    vidx = polygon.vertices
    len_vertices = len(vidx)

    # Add up signed area of every triangle joining the origin and two consecutive vertices:
    # Area_OAB = 1/2 * (OA x OB), where A and B are consecutive vertices. 
    # iterating for all triangles (compatible with concave polygons as well):
    # Area = 1/2 * sum for all i < len(V) of (v_i X v_(i+1))
    cross_comps = [me.vertices[vidx[i]].co.cross(me.vertices[vidx[(i+1) % len_vertices]].co) for i in range(len_vertices)]
    area_polygon = mathutils.Vector()
    for i in range(len_vertices):
        area_polygon += cross_comps[i]
    
    if debug:
        print("Polygon area (own method): " + str(r(area_polygon.length/2)))
        print("Polygon area (blender attribute): " + str(r(polygon.area)))

    return area_polygon.length
コード例 #13
0
ファイル: pearls.py プロジェクト: dthemg/PEARLS
    def dictionary_update(self):
        """Refine frequency estimates"""
        rls_mat = self.rls.reshape((self.P, self.H))
        norms = np.linalg.norm(rls_mat, axis=1)
        max_norm = max(norms)
        sig_pitches = r(np.argwhere(norms > max_norm * 0.05))

        for p in sig_pitches:
            gp = self._Gp(p)

            # Estimate num harmonics
            p_rls = rls_mat[p, :]
            H = np.max(np.argwhere(abs(p_rls) > np.max(abs(p_rls)) * 0.2))
コード例 #14
0
    def load_char_label_dico(filePath):

        print("Loading CharLabelDico ... ")
        start_time = datetime.now()
        charLabelMap = {}
        with codecs.open(filePath, 'r', 'gb2312') as f:
            for line in f:
                lineWithoutCR = line.split("\n")[0]
                splitted = lineWithoutCR.split(" ")
                char = splitted[0]
                label = int(splitted[1])
                charLabelMap[char] = label
        print("Execution time: %s s." % utils.r(start_time))
        return charLabelMap
コード例 #15
0
    def __init__(self, name, image):

        self.nics = []
        self.name = name

        #start the container and record the container id sleeping randomly to try and improve performance at start
        #time.sleep(random.uniform(1,3))
        self.id = r(
            'docker run -id --privileged --name $name --hostname $name --net=none $image'
        ).strip()
        self.pid = r(
            "docker inspect -f '{{.State.Pid}}' $self.id").strip().strip("'")

        self.proc_path = '/proc/%s/ns/' % self.pid
        self.mnt_fd = open(self.proc_path + 'mnt', 'ro')
        self.var_run = '/var/run/netns/' + self.name

        if not os.path.exists('/var/run/netns'):
            os.mkdir('/var/run/netns')

        netns = self.proc_path + 'net'
        #link this to /var/run/netns so ip tool can identify the network ns
        r('ln -s $netns $self.var_run')
コード例 #16
0
def extrapolate(frontier, one, f, cf, problem):
    out = copy.deepcopy(one)
    two, three, four = threeOthers(frontier, one)
    changed = False
    for d in range(len(problem.decisions)):
        x, y, z = two[d], three[d], four[d]
        if utils.r() < cf:
            changed = True
            new = x + f * (y - z)
            out[d] = trim(new, d, problem)  # keep in range
    if not changed:
        d = random.randint(0, len(problem.decisions) - 1)
        out[d] = two[d]
    return out
コード例 #17
0
def plot_results(signal: np.ndarray, results: dict, P: Pearls,
                 true_freq: float, true_H: int):
    fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)

    t = np.arange(len(signal)) / P.fs

    ax3.plot(t, np.real(signal))

    w_hat_hist = results["w_hat_hist"]
    freq_hist = results["freq_hist"]
    rls_hist = results["rls_hist"]

    def get_weights(arr):
        return np.linalg.norm(arr.reshape(P.P, P.H), axis=1)

    p_weights = np.apply_along_axis(get_weights, 0, w_hat_hist)
    rls_weights = np.apply_along_axis(get_weights, 0, rls_hist)

    # Weight history
    ax1.plot(t, p_weights.T)
    ax2.plot(t, freq_hist.T)

    # Penalty parameters hist
    ax4.plot(t, rls_weights.T)

    # Final prediction
    w_hat_final = w_hat_hist[:, -1].reshape(P.P, P.H)
    freq_final = r(freq_hist[:, -1])
    pred_signal = np.zeros(P.L, dtype="complex")

    hs = np.arange(P.H)
    for i, row in enumerate(w_hat_final):
        freq = freq_hist[i]
        for h in range(P.H):
            pred_signal += row[h] * np.exp(1j * 2 * np.pi * t * freq * (h + 1))

    ax5.plot(t, np.real(pred_signal))

    # Fourier transform of signal
    s_fft = np.fft.fft(signal)[:len(signal) // 2 + 1]
    f_ax = np.linspace(0, P.fs / 2, num=(len(s_fft)))
    ax6.plot(f_ax, np.abs(s_fft))
    for i in range(true_H):
        ax6.axvline(x=true_freq * (i + 1), color="r")
    ax6.set_xlim([0, 3000])

    plt.show()
コード例 #18
0
def convert_gnt_to_png(gnt_dir, png_dir, char_label_dico):

    start_time = datetime.now()
    i = 0
    for file_name in os.listdir(gnt_dir):
        file_path = os.path.join(gnt_dir, file_name)
        gnt_file = open(file_path, "r")
        for image, tag_code in extract_image_and_tag_from_gnt_file(gnt_file):
            i += 1
            tag_code_uni = struct.pack('>H', tag_code).decode(
                'gb2312')  # chinese character
            character_dir = png_dir + "/" + '%0.5d' % char_label_dico[
                tag_code_uni]
            # character_dir examples : '00000', '00001', '00002'...
            # character_dir is a dir that contains all the 240 images of a given character
            os.makedirs(character_dir, exist_ok=True)
            image_name = str(i) + ".png"
            cv2.imwrite(character_dir + '/' + image_name, image)
        gnt_file.close()
    print("Execution time: %s ." % utils.r(start_time))
    return i
コード例 #19
0
    def connect(self, container):
        """This will create a ethernet connection to another ns"""

        #creating a local var for the r() call
        pid = container.pid

        #count up our nics for naming scheme of container name + _number
        tmp_n = 0
        for nic in container.nics:
            tmp_n += 1

        #nicname = self.name + '_' + str(tmp_n)
        nicname = container.name + '_' + str(tmp_n)

        r('ip link add $nicname type veth peer name tmp')
        r('ip link set tmp netns $self.pid')
        r('ip link set $nicname netns $pid')

        #need to research more, but pretty sure checksum offloading was
        #screwing up udp packets.....
        #http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2007q3/001506.html
        #this disables offloading....
        if self.name != 'root':
            r('ip netns exec $self.name ethtool -K tmp rx off tx off')

        self.enter_ns()
        ###########################################

        #rename tmp to match veth peer in other ns
        r('ip link set dev tmp name $nicname')
        r('ethtool -K $nicname rx off tx off')

        self.exit_ns()

        #now append the nics to our list and the other containers
        self.nics.append(nicname)
        container.nics.append(nicname)
        return nicname
コード例 #20
0
    def setup_wifi(self, phy):
        """mov phy into this containers network namespace"""

        r('iw phy $phy set netns $self.pid')
コード例 #21
0
def training():
    training_data = Data(data_dir=Data.DATA_TRAINING)
    test_data = Data(data_dir=Data.DATA_TEST)
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          allow_soft_placement=True,
                                          log_device_placement=True)) as sess:

        training_init_op = training_data.get_batch(batch_size=FLAGS.batch_size,
                                                   aug=True)
        next_training_sample = training_data.get_next_element()
        graph = build_graph(top_k=1,
                            images=next_training_sample[0],
                            labels=next_training_sample[1])
        sess.run(tf.global_variables_initializer())
        coordinator = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)
        saver = tf.train.Saver()
        train_writer = tf.summary.FileWriter(LOG_DIR + '/training', sess.graph)
        test_writer = tf.summary.FileWriter(LOG_DIR + '/test')
        start_step = 0
        if FLAGS.restore:
            ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
            if ckpt:
                saver.restore(sess, ckpt)
                print("restore from the checkpoint %s" % ckpt)
                start_step += int(ckpt.split('-')[-1])

        logger.info('Start training')
        logger.info("Training data size: %d" % training_data.size)
        logger.info("Test data size: %d" % test_data.size)
        print("Getting training data...")

        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        sess.run(training_init_op)

        def train():
            start_time = datetime.now()
            print("Getting training data took %s " % utils.r(start_time))
            feed_dict = {
                graph['keep_nodes_probabilities']: 0.8,
                graph['is_training']: True
            }
            _, loss, train_summary, step = sess.run([
                graph['train_op'], graph['loss'], graph['merged_summary_op'],
                graph['step']
            ],
                                                    feed_dict=feed_dict)
            train_writer.add_summary(train_summary, step)
            logger.info("Step #%s took %s. Loss: %d \n" %
                        (step, utils.r(start_time), loss))
            return step

        start_time = datetime.now()
        eval_frequency = FLAGS.evaluation_step_frequency
        while not coordinator.should_stop():
            step = train()
            if step > FLAGS.max_steps:
                break

            if (step % eval_frequency == 0) and (step >= eval_frequency):
                feed_dict = {
                    graph['keep_nodes_probabilities']: 1.0,
                    graph['is_training']: False
                }
                accuracy_test, test_summary = sess.run(
                    [graph['accuracy'], graph['merged_summary_op']],
                    feed_dict=feed_dict)
                test_writer.add_summary(test_summary, step)
                logger.info('---------- Step #%d   Test accuracy: %.2f ' %
                            (int(step), accuracy_test))
            if step % FLAGS.saving_step_frequency == 1:
                logger.info('Saving checkpoint of step %s' % step)
                saver.save(sess,
                           os.path.join(FLAGS.checkpoint_dir,
                                        'online_hanzi_recog'),
                           global_step=graph['step'])

        logger.info('Training Completed in  %s ' % utils.r(start_time))
        coordinator.request_stop()
        train_writer.close()
        test_writer.close()
        saver.save(sess,
                   os.path.join(FLAGS.checkpoint_dir, 'online_hanzi_recog'),
                   global_step=graph['step'])
        coordinator.join(threads)
        sess.close()
コード例 #22
0
 def any(self):
     return self.low + utils.r() * (self.high - self.low)