Exemplo n.º 1
0
def get_dag(num_cells):
    dag = OrderedDict()
    operations = list(model._OPERAIONS.keys())
    num_operations = len(operations)
    for i in xrange(1, num_cells + 1):
        name = 'node_%d' % i
        if i == 1 or i == 2:
            node = model.Node(name, None, None, None, None)
        else:
            p_node_1 = 'node_%d' % random.randint(1, i - 1)
            p_node_2 = 'node_%d' % random.randint(1, i - 1)
            op1 = operations[random.randint(1, num_operations)]
            op2 = operations[random.randint(1, num_operations)]
            node = model.Node(name, p_node_1, p_node_2, op1, op2)
        dag[name] = node
    return dag
Exemplo n.º 2
0
def generateGraph(n):
    graph = m.Graph()
    for i in range(n):
        node = m.Node(i, r.randint(0, maxX), r.randint(0, maxY),
                      r.randint(0, maxD))
        graph.addNode(node)
    graph.countDistances()
    return graph
Exemplo n.º 3
0
def get_nodes(points: list) -> list:
    """Получить узлы из точек (нейтральные)"""
    nodes = []
    key = 0
    for point in points:
        nodes.append(
            model.Node(key=key, text=key, pos=point.to_dict(),
                       color='#FFFFFF'))
        key += 1
    return nodes
Exemplo n.º 4
0
 def setRules(self, node):
     model_name = f'table_{node._name}'
     if model_name in self.main._master.table_rules:  # is there a model for the table node?
         self.ruleModel = getattr(self.main._master.table_rules,
                                  node._name)  # retrieve it
     else:
         self.main._master.table_rules.append(model_name)
         self.ruleModel = md.Model(self.main._master._database, model_name)
         self.model_name = model_name
         setattr(self.main._master, model_name, self.ruleModel)
         if '_rows' in node._data and '_columns' in node._data:
             rows = node._data['_rows'].split(', ')
             for column in node._data['_columns'].split(', '):
                 column_node = md.Node(column, {})
                 self.ruleModel.addChild(column_node, 'Cell')
                 for row in rows:
                     row_node = md.Node(row, {})
                     parent = self.ruleModel.index(column_node._row,
                                                   column_node._column)
                     self.ruleModel.addChild(row_node, 'Cell', parent)
     self.ui.treeView.setModel(
         self.ruleModel)  # set the model for the rules
Exemplo n.º 5
0
 def _read_nodes(self, graph) -> dict:
     """Считать узлы графа"""
     nodes = dict()
     for section in graph:
         if str(section.tag) == 'section':
             if section.attrib['name'] == 'node':
                 node_id = section.findall("*[@key='id']")[0].text
                 text = section.findall("*[@key='label']")[0].text
                 graphics = section.findall("*[@name='graphics']")[0]
                 coordinates = self._get_coordinates(graphics)
                 color = graphics.findall("*[@key='fill']")[0].text.upper()
                 nodes[node_id] = model.Node(node_id, text, coordinates,
                                             color)
     return nodes
Exemplo n.º 6
0
	def test_node(self):
		n1 = model.Node([12,13])
		n2 = model.Node([21,23])
		n3 = model.Node([31,32])
		
		# Connect three nodes together in a chain
		n1.connect(12, n2, 21)
		n2.connect(23, n3, 32)
		
		# Check both connections exist and work both ways
		self.assertEqual(n1.connections[12], n2)
		self.assertEqual(n2.connections[21], n1)
		self.assertEqual(n2.connections[23], n3)
		self.assertEqual(n3.connections[32], n2)
		
		# Disconnect the connections and check this occurred
		n2.disconnect(21)
		n3.disconnect(32)
		self.assertIsNone(n1.connections[12])
		self.assertIsNone(n1.connections[13])
		self.assertIsNone(n2.connections[21])
		self.assertIsNone(n2.connections[23])
		self.assertIsNone(n3.connections[31])
		self.assertIsNone(n3.connections[32])
Exemplo n.º 7
0
def main():

    model.setup_all()
    comp = os.getenv('COMPUTERNAME')
    print comp

    newNode = model.Node.get_by(Name=unicode(comp))
    if newNode == None:
        print "hello"
        newNode = model.Node(Name=unicode(comp))
        model.saveData()
        print newNode
    app = QtGui.QApplication(sys.argv)
    window = RN_window(comp, newNode)
    window.show()
    # It's exec_ because exec is a reserved word in Python
    sys.exit(app.exec_())
Exemplo n.º 8
0
 def __init__(self):
     QtGui.QMainWindow.__init__(self)
     #print "yeehhaa"
     #self.computerName = computerName
     #self.Node = Node
     self.timer = QtCore.QTimer()
     self.timer.timeout.connect(self.start)
     self.timer.start(8000)
     self.labelText = "waiting...."
     #self.app = app
     self.ui = LYPA_renderNodeUI.Ui_MainWindow()
     self.ui.setupUi(self)
     model.setup_all()
     comp = os.getenv('COMPUTERNAME')
     self.newNode = model.Node.get_by(Name=unicode(comp))
     if self.newNode == None:
         self.newNode = model.Node(Name=unicode(comp))
         model.saveData()
     self.start()
Exemplo n.º 9
0
    def take_inventory(self, scanner=None):
        if scanner is None:
            scanner = self.do_ping_scan

        model.Session.begin()
        i = model.Inventory()
        i.starttime = datetime.datetime.now()
        model.Session.add(i)

        num = 0
        for ip in scanner():
            n = model.Node()
            n.ip = ip
            n.inventory = i
            model.Session.add(n)
            num += 1

        i.numup = num
        i.endtime = datetime.datetime.now()

        model.Session.add(i)
        model.Session.commit()
        return i
Exemplo n.º 10
0
 def addAction(self, name, obj_type):
     node = md.Node(name, {})
     node.copies = []
     self.main._master.addChild(node, obj_type, '_rule',
                                self.getSelection())
Exemplo n.º 11
0
    """Заглушка генератора миссий"""

    def __init__(self, config: configs.Config):
        super().__init__(config)
        self.generations = []

    def make_mission(self, mission_template: str, file_name: str, tvd_name: str):
        self.generations.append((file_name, tvd_name))

    def make_ldb(self, tvd_name: str):
        pass


TEST = 'test'
TEST_NODES_LIST = [
    model.Node(key='1',  text='L', pos={'x': 3, 'z': 5}, color=COLOR_WHITE),
    model.Node(key='2',  text='L', pos={'x': 10, 'z': 1}, color=COLOR_WHITE),
    model.Node(key='3',  text='L', pos={'x': 16, 'z': 1}, color=COLOR_WHITE),
    model.Node(key='4',  text='L', pos={'x': 23, 'z': 1}, color=COLOR_WHITE),
    model.Node(key='5',  text='L', pos={'x': 25, 'z': 13}, color=COLOR_WHITE),
    model.Node(key='6',  text='L', pos={'x': 22, 'z': 18}, color=COLOR_WHITE),
    model.Node(key='7',  text='L', pos={'x': 24, 'z': 29}, color=COLOR_WHITE),
    model.Node(key='8',  text='L', pos={'x': 21, 'z': 32}, color=COLOR_WHITE),
    model.Node(key='9',  text='L', pos={'x': 11, 'z': 33}, color=COLOR_WHITE),
    model.Node(key='10', text='L', pos={'x': 0, 'z': 28}, color=COLOR_WHITE),
    model.Node(key='11', text='L', pos={'x': 2, 'z': 16}, color=COLOR_WHITE),
    model.Node(key='12', text='28', pos={'x': 7, 'z': 5}, color=COLOR_BLUE),
    model.Node(key='13', text='44', pos={'x': 14, 'z': 3}, color=COLOR_BLUE),
    model.Node(key='14', text='43', pos={'x': 20, 'z': 2}, color=COLOR_BLUE),
    model.Node(key='15', text='42', pos={'x': 22, 'z': 4}, color=COLOR_BLUE),
    model.Node(key='16', text='46', pos={'x': 21, 'z': 14}, color=COLOR_BLUE),
Exemplo n.º 12
0
def submit(job, pipe=sys.stdout.write):
    """
    Submit job.

    Parameters
    ----------
    job: front2back.BackendJob
        The job to be submitted.
    pipe: function
        The function to pipe progress messages. When called by the user 
        interface, messages are by default piped to the message window.
    """

    pipe(' \n\n')
    pipe(' Job {}\n'.format(job.getName()))
    pipe(' -------------------------------\n')
    pipe('   Submitted \n')
    pipe('   {}\n'.format(tm.ctime()))

    #  Read job definition

    jobName = job.getName()
    jobModel = job.getModel()

    jobThickness = job.getThickness()
    jobDamage = job.getDamage()

    jobMaterial = job.getMaterial()
    jobBoundary1, jobBoundary2, jobBoundary3 = job.getBoundaries()
    jobWastage = job.getCorrosion()
    jobTemperature = job.getTemperature()

    jobAnalysis = job.getAnalysis()

    if jobAnalysis == 'Modal':
        modes, normalization = job.getModalSettings().values()
    else:
        alpha, beta, period, increment, lcase = job.getTimeHistorySettings(
        ).values()

    #  Define element labels of damaged areas

    if jobModel == 0:  # 'Healthy state'
        damagedElements = []
    elif jobModel == 1:  # 'Damaged state 1'
        damagedElements = [49 * 6]
    elif jobModel == 2:  # 'Damaged state 2'
        damagedElements = [49 * 6, 49 * 6 + 1]
    elif jobModel == 3:  # 'Damaged state 3'
        damagedElements = [49 * 6, 49 * 6 + 1, 49 * 6 + 2]
    elif jobModel == 4:  # 'Damaged state 4'
        damagedElements = [100 * 6 + 5]
    elif jobModel == 5:  # 'Damaged state 5'
        damagedElements = [100 * 6 + 5, 100 * 6 + 4]
    elif jobModel == 6:  # 'Damaged state 6'
        damagedElements = [100 * 6 + 5, 100 * 6 + 4, 100 * 6 + 3]

    #  Define Geometry

    length = 25  # Dimension in x-axis
    density = 2000  # Material density

    height_start = 0.60  # Dimension in y-axis
    height_end = height_start

    width_start = 0.1  # Dimension in z-axis
    width_end = width_start

    nel_x = 200  # Number of elements in x-axis
    nel_y = 6  # Number of elements in y-axis

    el_size_x = length / nel_x  # Element size in x-direction
    el_size_y = height_start / nel_y  # Element size in y-direction

    points_x = np.arange(0, length * (1 + 1 / nel_x) - 1e-10, length / nel_x)
    counter = it.count(0)

    points_y = []
    nodes = []
    indices = []

    #  Define model nodes

    for i, x in enumerate(points_x):

        h = height_start - x / length * (height_start - height_end)
        points_y.append(
            np.arange(-h / 2,
                      h * (1 / 2 + 1 / nel_y) - 1e-10, h / nel_y))

        for y in points_y[i]:
            nodes.append(model.Node([x, y, 0]))
            nodes[-1].SetValue('adof', ['x', 'y'])
            label = next(counter)

            if x < length - 1e-10 and y < h / 2 - 1e-10:
                indices.append(label)

    #  Define model elements

    elements = []
    etype = quadrilaterals.Quad4()
    irule = quadrature.Gauss.inQuadrilateral(rule=2).info

    for i, j in enumerate(indices):

        #  Define element nodes

        enodes = [
            nodes[j], nodes[j + nel_y + 1], nodes[j + nel_y + 2], nodes[j + 1]
        ]

        # Coordinates of element center-point

        xc = np.sum([node.coords[0] for node in enodes]) / len(enodes)
        yc = np.sum([node.coords[1] for node in enodes]) / len(enodes)

        # Coordinates of element integration points

        xi = xc + irule[:, 0] * el_size_x
        yi = yc + irule[:, 1] * el_size_y

        #  Interpolate temperature at gauss points

        temperature = np.interp(xi, jobTemperature[:, 1] * length,
                                jobTemperature[:, 0])

        #  Calculate stiffnes reduction

        reduction = jobDamage if i in damagedElements else 0

        #  Define material properties for each integration point

        E = np.interp(xi, jobMaterial[:, 2], jobMaterial[:,
                                                         0]) * (1 - reduction)
        n = np.interp(xi, jobMaterial[:, 2], jobMaterial[:, 1])
        materials = []

        for k in range(len(xi)):
            materials.append(material.LinearElastic(E[k], n[k], density))

        #  Interpolate thickness at gauss points

        wastage = np.interp(xi, jobWastage[:, 1] * length, jobWastage[:, 0])
        thickness = jobThickness * (np.ones(len(xi)) - wastage)
        thickness = np.ones(9) * 0.5

        #  Define elements and modify damaged ones

        elements.append(
            model.Element(enodes, etype, materials, thickness, irule))

    #  Initialize model

    model1 = model.Model(nodes, elements)

    # Interpolate temperature at boundary locations

    temp1 = np.interp(0, jobTemperature[:, 1] * length, jobTemperature[:, 0])
    temp2 = np.interp(length / 2, jobTemperature[:, 1] * length,
                      jobTemperature[:, 0])
    temp3 = np.interp(length, jobTemperature[:, 1] * length, jobTemperature[:,
                                                                            0])

    # Interpolate boundary values at temperature value

    kx1 = np.interp(temp1, jobBoundary1[:, 2], jobBoundary1[:, 0])
    ky1 = np.interp(temp1, jobBoundary1[:, 2], jobBoundary1[:, 1])

    kx2 = np.interp(temp2, jobBoundary2[:, 2], jobBoundary2[:, 0])
    ky2 = np.interp(temp2, jobBoundary2[:, 2], jobBoundary2[:, 1])

    kx3 = np.interp(temp3, jobBoundary3[:, 2], jobBoundary3[:, 0])
    ky3 = np.interp(temp3, jobBoundary3[:, 2], jobBoundary3[:, 1])

    #  Apply boundary conditions

    dtol = 1e-5
    blabels = []  # Labels of boundary nodes

    for node in nodes:
        x, y = node.coords[0], node.coords[1]

        #  Left-hand side constraints

        if np.abs(x - 0) < dtol and np.abs(y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx1, ky1])
        elif np.abs(x - el_size_x) < dtol and np.abs(y +
                                                     height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx1, ky1])
        elif np.abs(x - el_size_x * 2) < dtol and np.abs(y + height_start /
                                                         2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx1, ky1])
        elif np.abs(x - el_size_x * 3) < dtol and np.abs(y + height_start /
                                                         2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx1, ky1])

        #  Mid-point constraints

        elif np.abs(x - (length / 2 - el_size_x * 2)) < dtol and np.abs(
                y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx2, ky2])
        elif np.abs(x - (length / 2 - el_size_x)) < dtol and np.abs(
                y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx2, ky2])
        elif np.abs(x - length / 2) < dtol and np.abs(y +
                                                      height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx2, ky2])
        elif np.abs(x - (length / 2 + el_size_x)) < dtol and np.abs(
                y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx2, ky2])
        elif np.abs(x - (length / 2 + el_size_x * 2)) < dtol and np.abs(
                y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx2, ky2])

        #  Right-hand side constraints

        elif np.abs(x - (length - el_size_x * 3)) < dtol and np.abs(
                y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx3, ky3])
        elif np.abs(x - (length - el_size_x * 2)) < dtol and np.abs(
                y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx3, ky3])
        elif np.abs(x -
                    (length - el_size_x)) < dtol and np.abs(y + height_start /
                                                            2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx3, ky3])
        elif np.abs(x - length) < dtol and np.abs(y + height_start / 2) < dtol:
            blabels.append(node.label)
            model1.constraints.addSpring(node.label, ['x', 'y'], [kx3, ky3])

    #  Extract degrees of freedom for output locations.

    columns = np.arange(5, nel_x + 5, 10)[np.newaxis].T * (nel_y + 1)
    olabels = np.tile(np.array([1, 3, 5]), len(columns)).reshape(
        (len(columns), 3))
    olabels = (columns + olabels).reshape((olabels.size, ))
    odofs = np.sort(np.hstack((olabels * 2, olabels * 2 + 1)))
    ocoords = np.array([(nodes[j].coords[0], nodes[j].coords[1])
                        for j in olabels])

    # Save labels and coordinates of nodes where response quantities are
    # extracted.

    output = np.vstack((olabels, ocoords.T)).T
    labels, frmt = 'label  x  y', ['%d', '%10.5f', '%10.5f']
    np.savetxt('Output_nodes.dat', output, fmt=frmt, header=labels)

    # Save labels of measurement degrees of freedom

    xlabels = np.array([str(item) + 'x' for item in olabels])
    ylabels = np.array([str(item) + 'y' for item in olabels])
    labels = np.vstack((xlabels, ylabels)).T.flatten()
    labels = '   '.join(label for label in labels)

    #  Run analysis

    if jobAnalysis == 'Modal':

        # Submit modal analysis

        pipe('   \n')
        pipe('   Started: analysis \n')

        modal = analysis.Modal(model1)
        modal.setNumberOfEigenvalues(modes)
        modal.setNormalizationMethod(normalization)
        modal.submit()

        pipe('   Completed: analysis \n\n')

        # Extract mode shapes at output locations

        frequencies = modal.frequencies
        modes = modal.modes[odofs, :]

        #  Save results

        pipe('   Started: writting output \n')

        np.savetxt(jobName + '_frequencies.dat', frequencies)
        np.savetxt(jobName + '_modes.dat', modes, header=labels)

        pipe('   Completed: writting output \n\n')

        # pipe('  Saved output files \n')

    elif jobAnalysis == 'Time history':

        model1.setDampingCoefficients(alpha, beta)

        # Select load case

        if lcase == 0:
            nlabels = np.arange(nel_y + 1, (nel_x + 1) * (nel_y + 1),
                                nel_y + 1)
            lcase = np.loadtxt('Load_case_1.dat', skiprows=1)
            velocity, load = lcase[0], lcase[1]

            for j, nlabel in enumerate(nlabels):

                t1 = nodes[nlabels[j - 1]].coords[0] / velocity
                t2 = nodes[nlabels[j]].coords[0] / velocity

                if j == 0:
                    t1 = t2 - 1e-5

                if j == nlabels.shape[0] - 1:
                    t3 = t2 + 1e-5
                else:
                    t3 = nodes[nlabels[j + 1]].coords[0] / velocity

                time = np.array([t1, t2, t3])
                force = np.array([0, 1e3 * load, 0])
                amplitude = [np.array([time, force])]

                model.Load(model1).addForce(nodes[nlabel].label, 'y',
                                            amplitude)

        elif lcase == 1:
            nlabel = 63 * (nel_y + 1) - 1

            lcase = np.loadtxt('Load_case_2.dat', skiprows=1)
            time, force = lcase[:, 0], lcase[:, 1]
            amplitude = [np.array([time, force])]

            model.Load(model1).addForce(nodes[nlabel].label, 'y', amplitude)

        elif lcase == 2:
            nlabel = 139 * (nel_y + 1) - 1

            lcase = np.loadtxt('Load_case_3.dat', skiprows=1)
            time, force = lcase[:, 0], lcase[:, 1]
            amplitude = [np.array([time, force])]

            model.Load(model1).addForce(nodes[nlabel].label, 'y', amplitude)

        elif lcase == 3:
            lcase = np.loadtxt('Load_case_4.dat', skiprows=1)
            time, forces = lcase[:, 0], lcase[:, 1:]
            nlabels = np.arange(nel_y + 1, (nel_x + 1) * (nel_y + 1),
                                nel_y + 1)

            for j, nlabel in enumerate(nlabels):

                amplitude = [np.array([time, forces[:, j]])]
                model.Load(model1).addForce(nodes[nlabel].label, 'y',
                                            amplitude)

        # Define dynamic analysis

        pipe('  \n')
        pipe('   Started: analysis \n')

        dynamics = analysis.Dynamics(model1)
        dynamics.setTimePeriod(period)
        dynamics.setIncrementSize(period)
        dynamics.submit()

        pipe('   Completed: analysis \n\n')

        time = np.arange(0, period + increment, increment)
        nmodes = dynamics.displacement.shape[0]

        displacement = np.zeros((nmodes, time.size))
        acceleration = np.zeros((nmodes, time.size))

        for m in range(nmodes):

            displacement[m, :] = np.interp(time, dynamics.time,
                                           dynamics.displacement[m, :])
            acceleration[m, :] = np.interp(time, dynamics.time,
                                           dynamics.acceleration[m, :])

        # Extract displacements and accelerations at output degrees of freedom

        displacements = dynamics.modes[odofs, :].dot(displacement).T
        accelerations = dynamics.modes[odofs, :].dot(acceleration).T

        # Extract strains at output degrees of freedom

        strains = np.zeros((time.size, len(olabels), 3))  # define time_steps
        rcoords = [[1, 1], [1, -1], [-1, -1], [-1, 1]]

        strain_history = np.zeros((time.size, 3))

        for k, olabel in enumerate(olabels):
            elabels = np.sort(nodes[olabel].links)

            for elabel, (r1, r2) in zip(elabels, rcoords):

                ncoords = elements[elabel].getNodeCoordinates()
                ipoints = elements[elabel].getIntegrationPoints()

                edofs = elements[elabel].getNodeDegreesOfFreedom()
                disp = dynamics.modes[edofs, :].dot(displacement)  # .T
                element = elements[elabel].getType()

                # 1. rows of disp contain element displacements
                # 2. columns of disp should contain time steps

                strain = element.getStrain(ncoords, disp, ipoints, r1, r2).T

                # 1. columns of strain contain components Exx, Eyy, Exy
                # 2. rows of strain contain time steps

                strain_history += strain

            strains[:, k, :] = strain_history / len(nodes[olabel].links)
            strain_history[:] = 0

        strains = strains.reshape((time.size, len(olabels) * 3))

        # Save results (displacements, accelerations and strains)

        pipe('   Started: writting output \n')

        labels = ''.join([
            'Node-{}-Ux'.format(label).ljust(24, ' ') +
            'Node-{}-Uy'.format(label).ljust(24, ' ') for label in olabels
        ])
        fname = jobName + '_displacements.dat'
        np.savetxt(fname, displacements, fmt='% .16e', header=labels)

        labels = ''.join([
            'Node-{}-Ax'.format(label).ljust(24, ' ') +
            'Node-{}-Ay'.format(label).ljust(24, ' ') for label in olabels
        ])
        fname = jobName + '_accelerations.dat'
        np.savetxt(fname, accelerations, fmt='% .16e', header=labels)

        labels = ''.join([
            'Node-{}-Exx'.format(label).ljust(24, ' ') +
            'Node-{}-Eyy'.format(label).ljust(24, ' ') +
            'Node-{}-Exy'.format(label).ljust(24, ' ') for label in olabels
        ])
        fname = jobName + '_strains.dat'
        np.savetxt(fname, strains, fmt='% .16e', header=labels)

        pipe('   Completed: writting output \n\n')

    elif jobAnalysis == 'Static':

        nlabel = 63 * (nel_y + 1) - 1

        # lcase = np.loadtxt('Load_case_2.dat', skiprows=1)
        # time, force = lcase[0, 0], lcase[1, 1]
        time = 30  # np.linspace(0, 30, 10000)
        force = 1e3
        amplitude = [np.array([time, force])]
        model.Load(model1).addForce(nodes[nlabel].label, 'y', amplitude)

        # Define static analysis

        pipe('  \n')
        pipe('   Started: analysis \n')

        static = analysis.Static(model1)
        static.submit()

        pipe('   Completed: analysis \n\n')

        # Extract displacements at output degrees of freedom

        displacements = static.displacement[odofs]  # [np.newaxis]

        # Extract strains at output nodes

        strains = np.zeros((len(olabels), 3))
        rcoords = [[1, 1], [1, -1], [-1, -1], [-1, 1]]

        for k, olabel in enumerate(olabels):
            elabels = np.sort(nodes[olabel].links)

            for elabel, (r1, r2) in zip(elabels, rcoords):
                ncoords = elements[elabel].getNodeCoordinates()
                ipoints = elements[elabel].getIntegrationPoints()

                edofs = elements[elabel].getNodeDegreesOfFreedom()
                disp = static.displacement[edofs]
                element = elements[elabel].getType()

                strain = element.getStrain(ncoords, disp, ipoints, r1, r2)
                nodes[olabel].strain += strain

            strains[k, :] = nodes[olabel].strain

        strains = strains.reshape((1, strains.size))

        # Save results

        labels = ''.join([
            'Node-{}-Exx'.format(label).ljust(24, ' ') +
            'Node-{}-Eyy'.format(label).ljust(24, ' ') +
            'Node-{}-Exy'.format(label).ljust(24, ' ') for label in olabels
        ])

        pipe('   Started: writting output\n')

        np.savetxt(jobName + '_displacements.dat',
                   displacements,
                   header=labels)
        np.savetxt(jobName + '_strains.dat',
                   strains,
                   fmt='% .16e',
                   header=labels)

        pipe('   Completed: writting output\n')

    pipe('   Completed \n')
    pipe('   {}\n'.format(tm.ctime()))
Exemplo n.º 13
0
def main(unused_argv):
    # Using the Winograd non-fused algorithms provides a small performance boost.
    os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'

    if FLAGS.random_dag:
        conv_dag = get_dag(FLAGS.num_cells)
        reduc_dag = get_dag(FLAGS.num_cells)
    else:
        conv_dag = OrderedDict()
        conv_dag['node_1'] = model.Node('node_1', None, None, None, None)
        conv_dag['node_2'] = model.Node('node_2', None, None, None, None)
        conv_dag['node_3'] = model.Node('node_3', 'node_2', 'node_2',
                                        'sep_conv 3x3', 'identity')
        conv_dag['node_4'] = model.Node('node_4', 'node_2', 'node_1',
                                        'sep_conv 5x5', 'identity')
        conv_dag['node_5'] = model.Node('node_5', 'node_1', 'node_2',
                                        'avg_pool 3x3', 'sep_conv 3x3')
        conv_dag['node_6'] = model.Node('node_6', 'node_1', 'node_2',
                                        'sep_conv 3x3', 'avg_pool 3x3')
        conv_dag['node_7'] = model.Node('node_7', 'node_2', 'node_1',
                                        'sep_conv 5x5', 'avg_pool 3x3')
        reduc_dag = OrderedDict()
        reduc_dag['node_1'] = model.Node('node_1', None, None, None, None)
        reduc_dag['node_2'] = model.Node('node_2', None, None, None, None)
        reduc_dag['node_3'] = model.Node('node_3', 'node_1', 'node_2',
                                         'sep_conv 5x5', 'avg_pool 3x3')
        reduc_dag['node_4'] = model.Node('node_4', 'node_2', 'node_2',
                                         'sep_conv 3x3', 'avg_pool 3x3')
        reduc_dag['node_5'] = model.Node('node_5', 'node_2', 'node_2',
                                         'avg_pool 3x3', 'sep_conv 3x3')
        reduc_dag['node_6'] = model.Node('node_6', 'node_5', 'node_2',
                                         'sep_conv 5x5', 'avg_pool 3x3')
        reduc_dag['node_7'] = model.Node('node_7', 'node_6', 'node_1',
                                         'sep_conv 3x3', 'sep_conv 5x5')

    with open(os.path.joint(FLAGS.model_dir, 'model_dag.json'), 'w') as f:
        dag = OrderedDict()
        dag['conv_dag'] = conv_dag
        dag['reduc_dag'] = reduc_dag
        json.dump(dag, f)

    # Set up a RunConfig to only save checkpoints once per training cycle.
    run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)
    cifar_classifier = tf.estimator.Estimator(model_fn=cifar10_model_fn,
                                              model_dir=FLAGS.model_dir,
                                              config=run_config,
                                              params={
                                                  'num_blocks':
                                                  FLAGS.num_blocks,
                                                  'num_cells': FLAGS.num_cells,
                                                  'num_nodes': FLAGS.num_nodes,
                                                  'num_classes': _NUM_CLASSES,
                                                  'filters': FLAGS.filters,
                                                  'conv_dag': conv_dag,
                                                  'reduc_dag': reduc_dag,
                                                  'data_format':
                                                  FLAGS.data_format,
                                                  'batch_size':
                                                  FLAGS.batch_size,
                                                  'T_0': FLAGS.T_0,
                                                  'T_mul': FLAGS.T_mul,
                                                  'lr_max': FLAGS.lr_max,
                                                  'lr_min': FLAGS.lr_min,
                                                  'conv_dag': conv_dag,
                                                  'reduc_dag': reduc_dag,
                                              })

    for _ in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
        tensors_to_log = {
            'learning_rate': 'learning_rate',
            'cross_entropy': 'cross_entropy',
            'train_accuracy': 'train_accuracy'
        }

        logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                                  every_n_iter=100)

        cifar_classifier.train(input_fn=lambda: input_fn(
            True, FLAGS.data_dir, FLAGS.batch_size, FLAGS.epochs_per_eval),
                               hooks=[logging_hook])

        # Evaluate the model and print results
        eval_results = cifar_classifier.evaluate(
            input_fn=lambda: input_fn(False, FLAGS.data_dir, FLAGS.batch_size))
        print(eval_results)
Exemplo n.º 14
0
                return
            else:
                pass
        if jobStat == len(task.job.Tasks):
            task.job.Status = 100
            model.saveData()
        else:
            task.job.Status = int(
                (100.0 / float(len(task.job.Tasks))) * float(jobStat))
            model.saveData()
        jobStat = 0

    def refreshMe(self):
        self.ui.label.setText(self.labelText)


if __name__ == "__main__":
    model.setup_all()
    comp = os.getenv('COMPUTERNAME')
    print comp
    newNode = model.Node.get_by(Name=unicode(comp))
    if newNode == None:
        print "hello"
        newNode = model.Node(Name=unicode(comp))
        model.saveData()
        print newNode
    app = QtGui.QApplication(sys.argv)
    window = RN_window(comp, newNode)
    window.show()
    # It's exec_ because exec is a reserved word in Python
    sys.exit(app.exec_())
Exemplo n.º 15
0
 def addAction(self, name, obj_type):
     selected = self.getSelection()
     self._master.addChild(md.Node(name, {}), obj_type, self.currentTree(),
                           selected)
Exemplo n.º 16
0
def build_graph(data_dir):
    graph = model.Graph()
    tm = time.time()

    #initialize header fields.
    header = Header()
    while True:
        try:
            line = raw_input()
        except EOFError:
            store(header, graph, data_dir)
            break

        #handle header
        if (line.split(trace.header_delimiter)[0] == trace.header_indicator):
            #log progress
            t = time.time()
            sys.stderr.write("%s,%s,%s\n" % (line, t, t - tm))
            tm = t
            #output
            if (not header.is_empty):
                store(header, graph, data_dir)
                graph = model.Graph()
            #update header fields
            header.update_from_header_line(line)
            continue

        #handle warts line
        line_dict = parse_line(line)
        dst_ip = line_dict["dst_ip"]
        hops = line_dict["hops"]
        #print hops #debug

        #handle traceroute hops.
        hop_list = hops.split(trace.hop_delimiter)
        #print hop_list #debug
        i = 0
        while (i <= len(hop_list) - 1):  #ignore preceding blanks.
            hop_dict = parse_hop(hop_list[i])
            if (hop_dict.has_key("ip")):
                break
            i += 1
        #add node
        hop_dict = parse_hop(hop_list[i])
        ip = hop_dict["ip"]
        rtt_i = hop_dict["rtt"]
        node_i = model.Node(ip)
        ind_i = graph.add_node(node_i)
        while i <= len(hop_list) - 1:
            j = i + 1
            is_direct = True
            while (j < len(hop_list)):
                hop_dict = parse_hop(hop_list[j])
                if (hop_dict.has_key("ip")):
                    break
                is_direct = False
                j += 1
            #print "i:%s, j:%s" % (i, j) #debug

            #print "ip:%s, ind_i:%s" % (ip, ind_i) #debug
            if i == len(hop_list) - 1 and dst_ip == ip:  #check if hop is host.
                #print "is not router" #debug
                graph.nodes[ind_i].is_router = False

            if j < len(hop_list):
                hop_dict = parse_hop(hop_list[j])
                ip = hop_dict["ip"]
                rtt_j = hop_dict["rtt"]
                node_j = model.Node(ip)
                ind_j = graph.add_node(node_j)
                #print "ip:%s, ind_i:%s" % (ip, ind_i) #debug
                if j == len(
                        hop_list) - 1 and dst_ip == ip:  #check if hop is host.
                    #print "is not router" #debug
                    graph.nodes[ind_j].is_router = False

                if not is_direct:
                    blank = (ind_i, j - i - 1, ind_j)
                    node_blank = model.Node(blank)
                    node_blank.is_blank = True
                    ind_blank = graph.add_node(node_blank)

                #add edge
                delay = rtt_j - rtt_i
                edge = model.Edge(ind_i, ind_j, delay)
                edge.is_direct = is_direct
                graph.add_edge(edge)

            rtt_i = rtt_j
            ind_i = ind_j
            i = j
Exemplo n.º 17
0
 def addRule(self, name, original_id, original_node):
     selected = self.getSelection()
     node = md.Node(name, {'_original': original_id})
     node.original = original_node
     original_node.copies.append(node)
     self.main._master.addChild(node, 'Rule', self.model_name, selected)