예제 #1
0
 def get_solution(self) -> Graph:
     if self._graph.is_empty():
         return Graph()
     solver = cp_model.CpSolver()
     solver.parameters.max_time_in_seconds = 60.0
     status = solver.Solve(self._model)
     if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
         return self._solution_to_graph(solver)
     else:
         return Graph()
예제 #2
0
 def __init__(self, view):
     self.__view = view
     self.__file_view = FileReader()
     self.__database_view = DatabaseView()
     self.__graph_view = Graph()
     self.__query_creator = QueryCreator()
     self.__logger = Logger()
     with open('src\config.json') as json_data_file:
         data = json.load(json_data_file)
         self.__serial_file = data['pickle']['file']
         self.__database_name = data['mysql']['db']
예제 #3
0
def test_one_edge(concomp):
    msg = "{} failed".format(concomp.__name__)

    g = Graph()

    g.add_edge(0, 1)

    cc = concomp(g)

    assert len(cc) == 2, msg
    assert cc[0] == cc[1], msg
예제 #4
0
파일: io.py 프로젝트: 2020saurav/DGA
def readInput():
    """First line contains two arguments
    n = number of vertices
    m = number of edges in the graph
    next m line conatins (a,b) reresenting an edge."""
    n, m = map(int, raw_input().split(" "))
    edges = []
    for i in range(0, m):
        a, b = map(int, raw_input().split(" "))
        edges.append((a, b))
    graph = Graph(n, m, edges)
    print graph.toString()
예제 #5
0
파일: io.py 프로젝트: 2020saurav/DGA
def readInput():
    '''First line contains two arguments
    n = number of vertices
    m = number of edges in the graph
    next m line conatins (a,b) reresenting an edge.'''
    n, m = map(int, raw_input().split(" "))
    edges = []
    for i in range(0, m):
        a, b = map(int, raw_input().split(" "))
        edges.append((a, b))
    graph = Graph(n, m, edges)
    print graph.toString()
    def _graph_factory(self):
        graph = Graph(container_dict=dict(padding=5,
                                          bgcolor='lightgray'))

        graph.new_plot(
                       padding=[50, 5, 5, 50],
#                       title='{}'.format(self.title),
                       xtitle='CDD Operating Voltage (V)',
                       ytitle='Intensity (fA)',
                       )
        graph.new_series(type='scatter',
                         marker='pixel')
        return graph
예제 #7
0
def test_two_disjoint_edges(concomp):
    msg = "{} failed".format(concomp.__name__)

    g = Graph()

    g.add_edge(0, 1)
    g.add_edge(2, 3)

    cc = concomp(g)

    assert len(cc) == 4
    assert cc[0] == cc[1]
    assert cc[2] == cc[3]
    assert cc[0] != cc[2]
예제 #8
0
    def _graph_factory(self):
        g = Graph(window_title='Coincidence Scan',
                  container_dict=dict(padding=5, bgcolor='lightgray')
                  )
        g.new_plot(padding=[50, 5, 5, 50],
                   ytitle='Intensity (fA)',
                   xtitle='Operating Voltage (V)')

        for di in self.spectrometer.detectors:
            g.new_series(
                         name=di.name,
                         color=di.color)

        return g
예제 #9
0
def testExtendSubgraph1():
    graph = Graph(5, 5, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)])
    extender = ExtendSubgraph(graph, 101, 3)
    task = Task([0, 1, 1, 0, 0], [0, 3], 0, 0)
    newTasks = extender.generateNewTasks(task)
    for task in newTasks:
        print task.vertices, task.edges
    def generate_unbalanced_graph(self, nodes_in_cluster):
        nodes = []
        for i in range(0, self.clusters):
            nodes.extend([i] * nodes_in_cluster[i])
        random.shuffle(nodes)

        edges = np.zeros((self.n, self.n))

        random_pin = np.random.choice([0, 1],
                                      edges.shape,
                                      p=[1 - self.p_in, self.p_in])
        random_pout = np.random.choice([0, 1],
                                       edges.shape,
                                       p=[1 - self.p_out, self.p_out])

        for i in range(0, self.n):
            for j in range(0, self.n):
                is_same_cluster = nodes[i] == nodes[j]

                if is_same_cluster:
                    edges[i][j] = random_pin[i][j]
                else:
                    edges[i][j] = random_pout[i][j]

        return Graph(self.n, nodes, edges)
예제 #11
0
def test_simple_chain(concomp, n):
    assert n > 1

    msg = "{} failed".format(concomp.__name__)

    g = Graph()

    for i in range(n):
        g.add_edge(i, i + 1)

    cc = concomp(g)

    assert len(cc) == n + 1

    for i in range(n):
        assert cc[i] == cc[i + 1], msg
예제 #12
0
def testExtendSubgraph3():
    graph = Graph(3, 3, [(0, 1), (1, 2), (0, 2)])
    extender = ExtendSubgraph(graph, 101, 3)
    task = Task([1, 1, 1], [1], 0, 0)
    newTasks = extender.generateNewTasks(task)
    for task in newTasks:
        print task.vertices, task.edges
예제 #13
0
    def _graph_factory(self):
        gc = self.graph_cnt
        cnt = '' if not gc else gc
        self.graph_cnt += 1
        name = self.parent.name if self.parent else 'Foo'

        g = Graph(window_title='{} Power Calibration {}'.format(name, cnt),
                               container_dict=dict(padding=5),
                               window_x=500 + gc * 25,
                               window_y=25 + gc * 25
                               )

        g.new_plot(
                   xtitle='Setpoint (%)',
                   ytitle='Measured Power (W)')
        g.new_series()
        return g
예제 #14
0
def test_two_nonterm(algo):
    test_data_path = LOCAL_CFPQ_DATA.joinpath('two_nonterm')
    base_algo: BaseProblem = algo()
    graph = Graph.from_txt(test_data_path.joinpath('Graphs/graph_1.txt'))
    grammar = cfg_from_txt(test_data_path.joinpath('Grammars/g.cfg'))
    base_algo.prepare(graph, grammar)

    result: ResultAlgo = base_algo.solve()
    assert result.matrix_S.nvals == 156
예제 #15
0
 def __init__(self, graph: Graph):
     self._n_vars: dict = {}
     self._e_vars: dict = {}
     self._k_var: IntVar
     self._model = cp_model.CpModel()
     self._graph: Graph = graph
     if graph.is_empty():
         return
     self._setup_problem()
예제 #16
0
def benchmark_cyclic_graph(n: int):
    print()
    print('graph vertex count = ' + str(n))
    graph = Graph.generate_cyclic(n)

    time_start = time.time()
    VmtlProblem(graph).get_solution()
    print('time: ' + str(round(time.time() - time_start, 3)) + 's')
    print('time: ' + str(round(time.time() - time_start, 5) * 1000) + 'ms')
예제 #17
0
 def _graph_default(self):
     g = Graph(container_dict=dict(padding=5))
     g.new_plot(padding=5)
     g.set_axis_traits(axis='y', visible=False)
     g.set_axis_traits(axis='x', visible=False)
     g.set_grid_traits(grid='x', visible=False)
     g.set_grid_traits(grid='y', visible=False)
     return g
예제 #18
0
def test_two_nonterm(algo):
    test_data_path = LOCAL_CFPQ_DATA.joinpath('two_nonterm')
    ms_algo: MultipleSourceProblem = algo()
    graph = Graph.from_txt(test_data_path.joinpath('Graphs/graph_1.txt'))
    grammar = cfg_from_txt(test_data_path.joinpath('Grammars/g.cfg'))
    ms_algo.prepare(graph, grammar)

    result: ResultAlgo
    result, with_cache = ms_algo.solve([1])
    assert result.matrix_S.nvals == 1 and with_cache.nvals == 1
예제 #19
0
    def graph(poly, opoly, line):
        from src.graph.graph import Graph

        g = Graph()
        g.new_plot()

        for po in (poly, opoly):
            po = np.array(po)
            try:
                xs, ys = po.T
            except ValueError:
                xs, ys, _ = po.T
            xs = np.hstack((xs, xs[0]))
            ys = np.hstack((ys, ys[0]))
            g.new_series(xs, ys)

    #    for i, (p1, p2) in enumerate(lines):
    #        xi, yi = (p1[0], p2[0]), (p1[1], p2[1])
    #        g.new_series(xi, yi, color='black')
        return g
예제 #20
0
def test_two_nonterm(algo):
    test_data_path = LOCAL_CFPQ_DATA.joinpath('two_nonterm')
    singlepath_algo: SinglePathProblem = algo()
    graph = Graph.from_txt(test_data_path.joinpath('Graphs/graph_1.txt'))
    grammar = cfg_from_txt(test_data_path.joinpath('Grammars/g.cfg'))
    singlepath_algo.prepare(graph, grammar)

    result: ResultAlgo = singlepath_algo.solve()
    assert result.matrix_S.nvals == 156

    paths = singlepath_algo.getPath(1, 1, "S")
    assert paths == 2
예제 #21
0
def benchmark_ms(algo_name, data, result_dir):
    """
    Measurement function for finding paths from set of vertices
    @param algo_name: concrete implementation of the algorithm
    @param data: dictionary in format {path to graph: list of paths to grammars}
    @param result_dir: directory for uploading results of measurement
    """
    header_index = ['graph', 'grammar', 'size_chunk', 'time', 'count_S']

    chunk_sizes = [1, 2, 4, 8, 16, 32, 50, 100, 500, 1000, 5000, 10000, None]

    for graph in data:
        result_index_file_path = result_dir.joinpath(
            f'{graph.stem}-{algo_name.__name__}-msindex')

        append_header = False
        if not exists(result_index_file_path):
            append_header = True

        result_csv = open(result_index_file_path, mode='a', newline='\n')
        csv_writer_index = csv.writer(result_csv,
                                      delimiter=',',
                                      quoting=csv.QUOTE_NONNUMERIC,
                                      escapechar=' ')

        if append_header:
            csv_writer_index.writerow(header_index)

        if not exists(result_index_file_path):
            csv_writer_index.writerow(header_index)

        g = LabelGraph.from_txt(graph)
        for grammar in data[graph]:
            algo = algo_name()
            algo.prepare(Graph.from_txt(graph), cfg_from_txt(grammar))
            for chunk_size in chunk_sizes:
                chunks = []
                if chunk_size is None:
                    chunks = g.chunkify(g.matrices_size)
                else:
                    chunks = g.chunkify(chunk_size)

                for chunk in tqdm(chunks, desc=f'{graph.stem}-{grammar.stem}'):
                    algo.clear_src(
                    )  # Attention (TODO): remove this line if you want to cache the result !
                    start = time()
                    res = algo.solve(chunk)
                    finish = time()

                    csv_writer_index.writerow([
                        graph.stem, grammar.stem, chunk_size, finish - start,
                        res.matrix_S.nvals
                    ])
예제 #22
0
def test_two_nonterm(algo):
    test_data_path = LOCAL_CFPQ_DATA.joinpath('two_nonterm')
    allpath_algo: AllPathsProblem = algo()
    graph = Graph.from_txt(test_data_path.joinpath('Graphs/graph_1.txt'))
    grammar = cfg_from_txt(test_data_path.joinpath('Grammars/g.cfg'))
    allpath_algo.prepare(graph, grammar)

    result: ResultAlgo = allpath_algo.solve()
    assert result.matrix_S.nvals == 156

    allpath_algo.prepare_for_exctract_paths()
    paths = allpath_algo.getPaths(1, 1, "S", 3)
    assert len(paths) == 1
예제 #23
0
def benchmark_index(algo_name, data, result_dir, rounds):
    """
    Measurement function for finding paths between all pairs of vertices
    @param algo_name: concrete implementation of the algorithm
    @param data: dictionary in format {path to graph: list of paths to grammars}
    @param result_dir: directory for uploading results of measurement
    @param rounds: number of measurement rounds
    @return: variance value for each round of measurements
    """
    header_index = ['graph', 'grammar', 'time', 'count_S', 'variance']

    variances = []
    for graph in data:
        result_index_file_path = result_dir.joinpath(
            f'{graph.stem}-{algo_name.__name__}-index')

        append_header = False
        if not exists(result_index_file_path):
            append_header = True
        result_csv = open(result_index_file_path, mode='a', newline='\n')
        csv_writer_index = csv.writer(result_csv,
                                      delimiter=',',
                                      quoting=csv.QUOTE_NONNUMERIC,
                                      escapechar=' ')

        if append_header:
            csv_writer_index.writerow(header_index)

        for grammar in data[graph]:
            algo = algo_name()
            algo.prepare(Graph.from_txt(graph), cfg_from_txt(grammar))
            count_S = 0
            times = []
            for _ in tqdm(range(rounds), desc=f'{graph.stem}-{grammar.stem}'):
                algo.prepare_for_solve()
                start = time()
                res = algo.solve()
                finish = time()
                times.append(finish - start)
                count_S = res.matrix_S.nvals

            sample_mean = get_sample_mean(times)
            variances.append(get_variance(times, sample_mean))
            csv_writer_index.writerow([
                graph.stem, grammar.stem, sample_mean, count_S,
                get_variance(times, sample_mean)
            ])

    return variances
예제 #24
0
    def _execute_power_calibration_check(self):
        '''
        
        '''
        g = Graph()
        g.new_plot()
        g.new_series()
        g.new_series(x=[0, 100], y=[0, 100], line_style='dash')
        do_later(self._open_graph, graph=g)

        self._stop_signal = TEvent()
        callback = lambda pi, r: None
        self._iterate(self.check_parameters,
                      g, False, callback)
예제 #25
0
    def _graph_default(self):
        g = Graph(container_dict=dict(padding=5,

                                      kind='h'))
        g.new_plot(xtitle='weight (mg)', ytitle='40Ar* (fA)',
                   padding=[60, 20, 60, 60]
#                   padding=60
                   )

        g.new_series()
        g.new_plot(xtitle='40Ar* (fA)', ytitle='%Error in Age',
                   padding=[30, 30, 60, 60]
                   )
        g.new_series()
#        fp = create_line_plot(([], []), color='red')
#        left, bottom = add_default_axes(fp)
#        bottom.visible = False
#        left.orientation = 'right'
#        left.axis_line_visible = False
#        bottom.axis_line_visible = False
#        left.visible = False

#        if self.kind == 'weight':
#            bottom.visible = True
#            bottom.orientation = 'top'
#            bottom.title = 'Error (ka)'
#            bottom.tick_color = 'red'
#            bottom.tick_label_color = 'red'
#            bottom.line_color = 'red'
#            bottom.title_color = 'red'
#        else:
#            left.title = 'Weight (mg)'
#        fp.visible = False
#        gd = GuideOverlay(fp, value=0.01, orientation='v')
#        fp.overlays.append(gd)
#        g.plots[0].add(fp)
#        self.secondary_plot = fp

        return g
예제 #26
0
    def _get_k_range(graph: Graph) -> list:
        vertex_count = len(graph.nodes)
        edge_count = len(graph.edges)
        vertex_edge_count = vertex_count + edge_count

        left_side = VmtlProblem._binomial(vertex_edge_count + 1,
                                          2) + VmtlProblem._binomial(
                                              edge_count + 1, 2)
        right_side = 2 * VmtlProblem._binomial(vertex_edge_count + 1,
                                               2) + VmtlProblem._binomial(
                                                   vertex_count + 1, 2)

        max_edges = graph.max_edges(
        )  # Max amount of edges connected to node in given graph
        k_min: int = sum(range(1, max_edges + 1))
        k_max: int = (max_edges + 1) * vertex_edge_count - k_min
        return [
            i for i in range(k_min, k_max + 1)
            if left_side <= i * vertex_count <= right_side
        ]
예제 #27
0
def benchmark_single_path(algo_name, data, result_dir):
    """
    Measurement function for extract single path
    @param algo_name: concrete implementation of the algorithm
    @param data: dictionary in format {path to graph: list of paths to grammars}
    @param result_dir: directory for uploading results of measurement
    """
    header_paths = ['graph', 'grammar', 'len_path', 'time']

    for graph in data:
        result_paths_file_path = result_dir.joinpath(
            f'{graph.stem}-{algo_name.__name__}-singlepaths')

        append_header = False
        if not exists(result_paths_file_path):
            append_header = True

        result_csv = open(result_paths_file_path, mode='a', newline='\n')
        csv_writer_paths = csv.writer(result_csv,
                                      delimiter=',',
                                      quoting=csv.QUOTE_NONNUMERIC,
                                      escapechar=' ')

        if append_header:
            csv_writer_paths.writerow(header_paths)

        if not exists(result_paths_file_path):
            csv_writer_paths.writerow(header_paths)

        for grammar in data[graph]:
            algo = algo_name()
            algo.prepare(Graph.from_txt(graph), cfg_from_txt(grammar))
            res = algo.solve()
            for elem in tqdm(res.matrix_S,
                             desc=f'{graph.stem}-{grammar}-paths'):
                start = time()
                paths = algo.getPath(elem[0], elem[1], "S")
                finish = time()
                csv_writer_paths.writerow(
                    [graph.stem, grammar.stem, paths, finish - start])
예제 #28
0
    def _finish_calibration(self):
        super(FusionsCO2PowerCalibrationManager, self)._finish_calibration()
        g = Graph()
        g.new_plot()

        # plot W vs 8bit dac
        x = self.graph.get_data(axis=1)
        _, y = self.graph.get_aux_data()

        xf = self.graph.get_data(axis=1, series=2)
        _, yf = self.graph.get_aux_data(series=3)

#        print xf
#        print yf
        x, y = zip(*zip(x, y))
        xf, yf = zip(*zip(xf, yf))
        g.new_series(x, y)
        g.new_series(xf, yf)

        self._ipm_coeffs_w_v_r = self._regress(x, y, FITDEGREES['linear'])
        self. _ipm_coeffs_w_v_r1 = self._regress(xf, yf, FITDEGREES['linear'])

        self._open_graph(graph=g)
예제 #29
0
    def _gc(self, p, det, kind):
        g = Graph(container_dict=dict(padding=5),
                  window_width=1000,
                  window_height=800,
                  window_x=40,
                  window_y=20
                  )
        with open(p, 'r') as fp:
            # gather data
            reader = csv.reader(fp)
            header = reader.next()
            groups = self._parse_data(reader)
            '''
                groups= [data,]
                data shape = nrow,ncols
                
            '''
            data = groups[0]
            x = data[0]
            y = data[header.index(det)]

        sy = smooth(y, window_len=120)  # , window='flat')

        x = x[::50]
        y = y[::50]
        sy = sy[::50]

        # smooth

        # plot
        g.new_plot(zoom=True, xtitle='Time (s)', ytitle='{} Baseline Intensity (fA)'.format(det))
        g.new_series(x, y, type=kind, marker='dot', marker_size=2)
        g.new_series(x, sy, line_width=2)
#        g.set_x_limits(500, 500 + 60 * 30)
#        g.edit_traits()
        return g
예제 #30
0
 def setup(self):
     self.g = Graph()
예제 #31
0
class Controller(ControllerBase):
    __view = None
    __file_view = None
    __database_view = None
    __graph_view = None
    __current_list = []
    __serial_file = None
    __query_creator = None
    __EMPID = 0

    def __init__(self, view):
        self.__view = view
        self.__file_view = FileReader()
        self.__database_view = DatabaseView()
        self.__graph_view = Graph()
        self.__query_creator = QueryCreator()
        self.__logger = Logger()
        with open('src\config.json') as json_data_file:
            data = json.load(json_data_file)
            self.__serial_file = data['pickle']['file']
            self.__database_name = data['mysql']['db']

    def load_file(self, path):
        data = self.__file_view.get_file_data(path)

        if data == FileNotFoundError:
            self.__view.output(data)
        else:
            self.__validate_contents(data)

    def __validate_contents(self, content):
        employee = Employee()
        self.__current_list = []
        for i, x in enumerate(content):
            result = employee.add_list(x)
            if 'fields' in result:
                self.__current_list.append(result['fields'])
            else:
                x = result['tags']
                self.__view.output('{} {}'.format(i, ' '.join(x)))

    def __to_lists(self, list_of_employees):
        self.__current_list = []
        for emp in list_of_employees:
            self.__current_list.append(emp.to_list())

    def pickle(self, args):
        if len(args) > 1:
            self.__view.output("Too many parameters")
        elif len(self.__current_list) == 0:
            self.__view.output("No data to pickle")
        elif len(args) == 1:
            Serial.pickle_this(args[0], self.__current_list)
        else:
            print('x')
            Serial.pickle_this(self.__serial_file, self.__current_list)
            self.__current_list = []

    def unpickle(self, args):
        """
        Gets the data previously stored in a pickle.
        Places this in __current_list
        :args: overwrite or append
        """
        if len(args) > 1:
            self.__view.output('Unpickle accepts one argument [overwrite or append]')
            return
        elif len(args) == 1:
            self.__current_list = Serial.unpickle_this(args[0])
        else:
            self.__current_list = Serial.unpickle_this(self.__serial_file)

    def display(self, args):
        if len(args) > 0:
            self.__view.output('display accepts no parameters')
        elif len(self.__current_list) > 0:
            for row in self.__current_list:
                self.__view.output(row)
        else:
            self.__view.output('No data to display')

    def save_to_database(self, args):
        if len(args) > 0:
            self.__view.output('display accepts no parameters')
        elif self.__current_list is []:
            self.__view.output('No data to save')
        else:
            self.__database_view.save_data_to_new(self.__database_name, self.__current_list)

    def chart_pie(self, title, data, label=None):
        sql = ''
        if label is not None:
            sql = self.__query_creator.get_pie_data_sum(data, label)
        else:
            sql = self.__query_creator.get_pie_data_count(data)
        new_list = self.__get_chart_data(sql)
        data_x = 1  # list numbers
        label_x = 0
        self.__graph_view.plot_pie(title, new_list[label_x], new_list[data_x])

    def chart_bar(self, title, x, y, top):
        sql = self.__query_creator.get_bar_data(x, y, top)
        new_list = self.__get_chart_data(sql)
        x_pos = 0
        y_pos = 1
        self.__graph_view.plot_bar(title, x, y, new_list[x_pos], new_list[y_pos])

    def __get_chart_data(self, sql):
        from_db = self.__database_view.get_input(self.__database_name, sql)
        return self.__fix_list(from_db)

    @staticmethod
    def __fix_list(from_db):
        print(from_db)
        new_list = [[], []]

        for i in from_db:
            new_list[0].append(i[0])
            new_list[1].append(i[1])

        return new_list
예제 #32
0
import re
from pathlib import Path

from flask import Flask, render_template, send_from_directory, request, Response, flash, redirect, url_for
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer

from src.graph.graph import Graph
from src.graph.vmtl_problem import VmtlProblem

app = Flask(__name__)
app.secret_key = b'blablabla'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0

graph = Graph()
Path("data").mkdir(parents=True, exist_ok=True)
graph_file_path: str = 'data/graph.json'


def dir_last_updated(folder):
    return str(max(os.path.getmtime(os.path.join(root_path, f))
                   for root_path, dirs, files in os.walk(folder)
                   for f in files))


@app.route("/")
def backend_index():
    global graph, graph_file_path

    graph.save_as_json(graph_file_path, id_as_label=True)
예제 #33
0
def backend_clear_graph():
    global graph, graph_file_path

    graph = Graph()
    graph.save_as_json(graph_file_path)
    return render_template("backend-graph-editor.html", last_updated=dir_last_updated('data'))
예제 #34
0
def frontend_clear_graph():
    global graph

    graph = Graph()
    return Response(status=200)
예제 #35
0
def testInitalTaskGeneration1():
    graph = Graph(5, 5, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)])
    initalTasks = genInitalTasks(graph, 101, 5)
    for task in initalTasks:
        print task.vertices, task.edges
예제 #36
0
    poly = np.array(poly)
    poly *= 1000

#    xs, ys = poly.T
#    cx, cy = xs.mean(), ys.mean()
#    poly = rotate_poly(poly.T, 45, loc=(cx, cy))
#    poly = poly.T

    use_convex_hull = False
    npoints, lens = raster(poly,
                     step=750,
                     offset= -500,
                     use_convex_hull=use_convex_hull, find_min=True)

    from src.graph.graph import Graph
    g = Graph(window_height=700)
    g.plotcontainer.padding = 5
    g.new_plot(padding=[60, 30, 30, 50],
               bounds=[400, 400],
               resizable='h',
               xtitle='X (microns)',
               ytitle='Y (microns)')

    if use_convex_hull:
        poly = convex_hull(poly)
        xs, ys = poly.T
        cx, cy = xs.mean(), ys.mean()
        P = poly.T
        xs = np.hstack((xs, xs[0]))
        ys = np.hstack((ys, ys[0]))
    else:
예제 #37
0
from src.graph.graph import Graph
from src.graph.graph import stringToGraph

assert stringToGraph("3$011101110").toString()\
==  Graph(3,3,[(0,1),(1,2),(0,2)]).toString()
예제 #38
0
    def _calculate_mean_ratio(self, s40, s36):

        r = [i40 / i36 for i40, i36 in zip(s40, s36)]

        v = [vi.nominal_value for vi in r]
        errs = [vi.std_dev for vi in r]

        m, e = calculate_weighted_mean(v, errs)

        return m, e


if __name__ == '__main__':
    from numpy import linspace, polyfit, polyval
    d = DeadTime()
    g = Graph()

    g.new_plot(padding=[30, 10, 20, 40])
    g.new_plot(padding=[30, 10, 20, 40], show_legend=True)
    nshots = d.read_csv()
#    taus = range(5, 40, 5)
    taus = linspace(0, 30, 41)
    rratios1 = []
    mswds1 = []

    for i in KEYS:
        s40 = nshots[i + '40']
        s36 = nshots[i + '36']
        m1, _ = d._calculate_mean_ratio(s40, s36)
        print 'uncorrected ratio {} = {:0.2f} '.format(i, m1)
예제 #39
0
from src.problems.Base.algo.matrix_base.matrix_base import MatrixBaseAlgo
from src.problems.MultipleSource.algo.matrix_ms.matrix_ms import MatrixMSBruteAlgo, MatrixMSOptAlgo
from src.problems.MultipleSource.algo.tensor_ms.tensor_ms import TensorMSAlgo
from src.problems.AllPaths.algo.tensor.tensor import TensorSimpleAlgo, TensorDynamicAlgo
from src.problems.SinglePath.algo.matrix_single_path.matrix_single_path_index import MatrixSingleAlgo

from src.graph.graph import Graph
from cfpq_data import cfg_from_txt

from src.problems.utils import ResultAlgo

from pathlib import Path

CASE = Path("test/data/binary_tree/")

graph = Graph.from_txt(CASE.joinpath("Graphs/graph_1.txt"))
grammar = cfg_from_txt(CASE.joinpath("Grammars/g.cfg"))
algo = MatrixBaseAlgo()
algo.prepare(graph, grammar)
res: ResultAlgo = algo.solve()
print(f'MatrixBaseAlgo: {res.matrix_S.nvals}')

graph = Graph.from_txt(CASE.joinpath("Graphs/graph_1.txt"))
grammar = cfg_from_txt(CASE.joinpath("Grammars/g.cfg"))
algo = TensorSimpleAlgo()
algo.prepare(graph, grammar)
res: ResultAlgo = algo.solve()
print(f'TensorSimpleAlgo: {res.matrix_S.nvals}')

graph = Graph.from_txt(CASE.joinpath("Graphs/graph_1.txt"))
grammar = cfg_from_txt(CASE.joinpath("Grammars/g.cfg"))
예제 #40
0
class TestGraph:
    def setup(self):
        self.g = Graph()

    def test_add_edge(self):
        self.g.add_edge(0, 1)

        for i in range(2):
            assert len(self.g[i]) == 1, len(self.g[i])

    def test_add_edge_loop(self):
        for i in range(10):
            self.g.add_edge(i, i + 1)

        for i in range(1, 9):
            assert len(self.g[i]) == 2, len(self.g[i])

        for i in [0, 10]:
            assert len(self.g[i]) == 1, len(self.g[i])

    def test_del_edge(self):
        self.g.add_edge(0, 1)
        for i in [0, 1]:
            assert len(self.g[i]) == 1, len(self.g[i])

        self.g.del_edge(0, 1)
        for i in [0, 1]:
            assert len(self.g[i]) == 0, len(self.g[i])

    def test_del_edge_nonexistent(self):
        self.g.del_edge(0, 1)
        assert True

    def test_del_edge_multiple(self):
        for i in range(3):
            self.g.add_edge(0, 1)

        self.g.del_edge(0, 1)

        for i in range(2):
            assert len(self.g[i]) == 0, len(self.g[i])
예제 #41
0
 def _graph_default(self):
     g = Graph(container_dict=dict(padding=5, stack_order='top_to_bottom'))
     g.width = self.record.item_width * 0.73
     return g
예제 #42
0
파일: run.py 프로젝트: 2020saurav/DGA
from config.servers import servers
from config.networkParams import MESSAGE_DELIMITER
import src.util.network as network

def readInput():
    '''First line contains two arguments
    n = number of vertices
    m = number of edges in the graph
    next m line conatins (a,b) reresenting an edge.'''
    n, m  = map(int, raw_input().split(" "))
    edges = []
    for i in range(0, m):
        a, b = map(int, raw_input().split(" "))
        edges.append((a, b))
    return n, m, edges

def findMasterIpPort():
    for s in servers :
        if s.role == 'master':
            return s.IP, s.port
    #master not found
    assert False

if __name__ == '__main__':
    n, m, edges = readInput()
    graph = Graph(n, m, edges)
    MasterIP, MasterPort = findMasterIpPort()
    network.sendToIP(MasterIP, MasterPort, "INPUT____________" + MESSAGE_DELIMITER + graph.toString())
    # TODO Wait for computation to end
    # merge all output file if required
예제 #43
0
    def _graph_factory(self, with_image=False):
        g = Graph(

                  container_dict=dict(
                                        padding=0
                                        ))
        g.new_plot(
                   bounds=[250, 250],
                   resizable='',
                   padding=[30, 0, 0, 30])

        cx = self.cx
        cy = self.cy
        cbx = self.xbounds
        cby = self.ybounds
        tr = self.target_radius

#        if with_image:
#            px = self.pxpermm  #px is in mm
#            cbx, cby = self._get_crop_bounds()
#            #g.set_axis_traits(tick_label_formatter=lambda x: '{:0.2f}'.format((x - w / 2) / px))
#            #g.set_axis_traits(tick_label_formatter=lambda x: '{:0.2f}'.format((x - h / 2) / px), axis='y')
#
#            bx, by = g.plots[0].bounds
#            g.plots[0].x_axis.mapper = LinearMapper(high_pos=bx,
#                                                    range=DataRange1D(low_setting=self.xbounds[0],
#                                                                      high_setting=self.xbounds[1]))
#            g.plots[0].y_axis.mapper = LinearMapper(high_pos=by,
#                                                    range=DataRange1D(low_setting=self.ybounds[0],
#                                                                      high_setting=self.ybounds[1]))
#            cx += self.image_width / 2
#            cy += self.image_height / 2
#            tr *= px

        g.set_x_limits(*cbx)
        g.set_y_limits(*cby)

        lp, _plot = g.new_series()
        t = TargetOverlay(component=lp,
                      cx=cx,
                      cy=cy,
                      target_radius=tr)

        lp.overlays.append(t)
        overlap_overlay = OverlapOverlay(component=lp,
                                              visible=self.show_overlap
                                              )
        lp.overlays.append(overlap_overlay)

        g.new_series(type='scatter', marker='circle')
        g.new_series(type='line', color='red')
        return g
예제 #44
0
def main():
    graph: Graph = Graph.load_from_json()
    print(graph)
예제 #45
0
def testInitalTaskGeneration2():
    graph = Graph(3, 3, [(0, 1), (1, 2), (0, 2)])
    initalTasks = genInitalTasks(graph, 101, 5)
    for task in initalTasks:
        print task.vertices, task.edges