コード例 #1
0
ファイル: linearwrap.py プロジェクト: quanlzheng/tensorpack
 def layer_func(*args, **kwargs):
     if len(args) and isinstance(args[0], six.string_types):
         name, args = args[0], args[1:]
         ret = layer(name, self._t, *args, **kwargs)
     else:
         ret = layer(self._t, *args, **kwargs)
     return LinearWrap(ret)
コード例 #2
0
 def draw(self):
     g = 50  # pixels per meter
     end = tools.fieldr(self.charge, 0.1 * self.E1) * g
     l = (2 * end)
     self.top = layer(l)
     self.side = layer(l)
     h = 0.1
     while h < 1:
         r = tools.fieldr(self.charge, h * self.E1) * g
         print "At ", r, " meters away E=", h * self.E1
         x = floor(sqrt(0.5 * (r**2)))
         while x >= self.top.s2:
             if r < self.transition:
                 sidey = tools.gety(x, r, self.top.s2 - (self.realSize / 2))
                 if self.thing.square and x < self.top.s2 + (self.realSize /
                                                             2):
                     topy = self.top.s2 - (self.realSize / 2) - r
                 else:
                     topy = sidey
             else:
                 sidey = tools.gety(x, r, self.top.s2)
                 topy = sidey
             self.top.mirror8(x, topy, h * self.E1)
             self.side.mirror8(x, sidey, h * self.E1)
             x -= 1
         h += 0.1
     if self.infinite:
         q = self.top.space[:, self.top.s2]
         i = 0
         while i < self.side.s1:
             self.side.space[:, i] = q
             i += 1
コード例 #3
0
 def f(*args, **kwargs):
     if len(args) and isinstance(args[0], six.string_types):
         name, args = args[0], args[1:]
         ret = layer(name, self._t, *args, **kwargs)
     else:
         ret = layer(self._t, *args, **kwargs)
     return LinearWrap(ret)
コード例 #4
0
 def draw(self):
     self.thing.show()
     g = 50  # pixels per meter
     top = layer(500)
     r = 1
     i = 1
     while r <= 250:
         x = top.s2 - r
         while x < top.s2:
             y = tools.gety(x, r, top.s2)
             top.mirror8(x, y, 255)
             x += 1
         r = floor(r * sqrt(10))
         i = i * 0.1
     top.show()
     q = top.space[:, top.s2]
     if self.infinite:
         i = 0
         side = layer(500)
         while i < side.s1:
             side.space[:, i] = q
             i += 1
     else:
         side = top
     side.show()
コード例 #5
0
 def layer_func(*args, **kwargs):
     if len(args) and isinstance(args[0], six.string_types):
         name, args = args[0], args[1:]
         ret = layer(self._name + "_" + name, self._t, *args,
                     **kwargs)
     else:
         ret = layer(self._t, *args, **kwargs)
     return Sequential(self._name, ret)
コード例 #6
0
ファイル: model.py プロジェクト: superliuwanjia/build_a_bug
    def get_lrrmn_output(self, ):
        inp = self.convnet_out
        for layer_no in range(self.lrcn_layers):
            with tf.variable_scope("lrrmn" + str(layer_no)):
                drmmnet = layer(inp, inp, inp, 1, [[1, 1, 1, 1]],
                                [[1, 1, self.convnet_out_sh[-1], 1]],
                                ["VALID"])
                drmmnet.EBottomUp()
                drmm_out = drmmvnet.get_output()
                drmm_out_shape = drmm_out.get_shape().as_list()
                drmm_out = tf.reshape(drmm_out,
                                      [self.inp_size[0], self.inp_size[1], -1])

                hidden_units = drmm_out_shape[1] * drmm_out_shape[
                    2] * drmm_out_shape[3]
                lrcn_cell = LRCNcell(hidden_units, 1, 2, str(layer_no))

                inp, state = tf.nn.dynamic_rnn(lrcn_cell,
                                               conv_out,
                                               dtype=tf.float32)
                inp = tf.reshape(inp, drmm_out_shape)
                # h = tf.split(state,2)[0]

        # self.output = tf.reshape(inp,[self.inp_size[0], self.inp_size[1], -1])
        self.output = tf.split(state, 2)[0]
        self.state = state
コード例 #7
0
 def forward(self, x):
     y = [None] * self.n
     y[0] = self.get_layer(0)(x)
     for j in range(1, self.n):
         x = []
         for i in self.in_links[j]:
             x.append(y[i])
             if j == self.out_links[i][-1]:
                 y[i] = None
         if not x:
             y[j] = None
         else:
             layer = self.get_layer(j)
             if isinstance(layer.base, models.Concat):
                 y[j] = layer(x)
             else:
                 x = sum(x)
                 y[j] = layer(x)
     return y[-1]
コード例 #8
0
ファイル: isonode.py プロジェクト: linmengsysu/HetLGN
 def isonode(self, g, types, features):
     nfeatures = self.flinears(features, types)
     hs = []
     sim = []
     for layer in self.layers:
         h_l, sim_l = layer(g, types, nfeatures)
         hs.append(h_l)
         sim.append(sim_l)  #[B, sub/64]
     hs = torch.cat(hs, dim=-1)  # [B, n_layer, out]
     # hc = self.context_graph_embedding(nfeatures)
     sim = torch.cat(sim, dim=-1)
     pickle.dump(sim, open('./data/log_sim.pkl', 'wb'))
     print('only sim final embedding size={}'.format(sim.size()))
     return hs  #torch.mean(hs, dim=1)
コード例 #9
0
ファイル: architecture.py プロジェクト: zhangsiyu1103/ESNAC
 def forward(self, x):
     y = [None] * self.n
     y[0] = self.get_layer(0)(x)
     for j in range(1, self.n):
         #print(j)
         x = []
         for i in self.in_links[j]:
             x.append(y[i])
             if j == self.out_links[i][-1]:
                 y[i] = None
         if not x:
             y[j] = None
         else:
             layer = self.get_layer(j)
             if isinstance(layer.base, models.Concat):
                 y[j] = layer(x)
             else:
                 x = sum(x)
                 #print("rep", layer.rep)
                 #print("previous output: ", y)
                 y[j] = layer(x)
                 #print(y[j].size())
     return y[-1]
コード例 #10
0
ファイル: stock.py プロジェクト: blckOS/blckOS
    def __init__(self,width=112,height=128,x=0,y=0,color1=(33,33,33),
        color2=(10,10,10),title="",blur=0,radius=.2):
        
        super().__init__(width=width,height=height,x=x,y=y,color1 = color1,radius = radius)
        self.title = title

        stockData = getStockData()
        apple = stockData["AAPL"]
        google = stockData["GOOG"]

        appTitle = label(color=(255,255,255),text="Stocks",fontSize=10,
            x=35, y=8, appWidth=112, appHeight=128, centered=False,
            strong=True)

        appleTitle = label(color=(153,153,153),text="AAPL",fontSize=10,
            x=11, y=35, appWidth=112, appHeight=128, centered=False,
            strong=False) 

        applePrice = label(color=(255,255,255),text=apple,fontSize=12,
            x=11, y=51, appWidth=112, appHeight=128, centered=False,
            strong=True)

        box1 = layer(width=98,height=43,x=7,y=27,color1=(255,255,255,25),
            radius=0.2,isSublayer=True) 

        googleTitle = label(color=(153,153,153), text="GOOG", fontSize=10,
            x=11, y=82, appWidth=112, appHeight=128, centered=False,
            strong=False)

        googlePrice = label(color=(255,255,255), text=google, fontSize=12,
            x=11, y=97, appWidth=112, appHeight=128, centered=False,
            strong=True) 
        
        box2 = layer(width=98,height=43,x=7,y=74,color1=(255,255,255,25),
            radius=0.2,isSublayer=True) 

        self.subLayerList=[appTitle,appleTitle,applePrice,box1,googleTitle,googlePrice,box2]
コード例 #11
0
    def __call__(self, inputs):
        """Forward propagation. \n

            inputs    network inputs: np.ndarray of [float]"""
        inputs = np.array(inputs)
        outputs = inputs
        for layer in self.layers:
            outputs = layer(outputs)

        if self.__type == "regressor":
            return outputs
        elif self.__type == "classifier":
            exp_sum = sum([exp(output) for output in outputs])
            outputs = [exp(output) / exp_sum for output in outputs]
            return outputs
        else:
            raise Exception()
コード例 #12
0
    def build(self):
        """ Wrapper for _build() """

        self._build()

        # Build sequential layer model
        # Feed the values from the previous layer to the next layer
        self.activations.append(self.inputs)
        for layer in self.layers:
            hidden = layer(self.activations[-1])
            self.activations.append(hidden)
        self.outputs = self.activations[-1]

        # Store model variables for easy access
        variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        # self.vars = {var.name: var for var in variables}
        print("variables =",
              tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))

        # Build metrics
        self._loss()
        self._accuracy()

        # matrix1 = variables[0]
        # matrix2 = variables[1]

        # attempt at stop_gradients which does not retain graph connections
        # masked_matrix1 = entry_stop_gradients_column(matrix1, tf.expand_dims(self.weight_mask,0))
        # masked_matrix2 = entry_stop_gradients_row(matrix2, tf.expand_dims(self.weight_mask, 1))

        self.opt_op2 = self.optimizer.compute_gradients(self.loss,
                                                        variables)[1]
        # self.opt_op2 = self.optimizer.compute_gradients(self.loss, ga)[0][0]
        # self.deneme = self.optimizer.compute_gradients(self.loss, variables[0])[0][0]
        # print(self.opt_op2)
        # print(self.deneme)
        # self.opt_op2 = tf.equal(matrix2, masked_matrix2)

        self.opt_op = self.optimizer.minimize(self.loss)
コード例 #13
0
 def layer_func(name, *args, **kwargs):
     if self._t != None:
         ret = layer(self._name + "_" + name, self._t, *args,
                     **kwargs)
         return Sequential(self._name, ret)
コード例 #14
0
 def f(name, *args, **kwargs):
     ret = layer(name, self._t, *args, **kwargs)
     return LinearWrap(ret)
コード例 #15
0
    def __init__(self,
                 width=112,
                 height=128,
                 x=0,
                 y=0,
                 color1=(33, 33, 33),
                 color2=(10, 10, 10),
                 title="",
                 blur=0,
                 radius=.2):

        super().__init__(width=width,
                         height=height,
                         x=x,
                         y=y,
                         color1=color1,
                         radius=radius)
        self.title = title

        stockData = getStockData()
        apple = stockData["AAPL"]
        google = stockData["GOOG"]

        appTitle = label(color=(255, 255, 255),
                         text="Stocks",
                         fontSize=10,
                         x=35,
                         y=8,
                         appWidth=112,
                         appHeight=128,
                         centered=False,
                         strong=True)

        appleTitle = label(color=(153, 153, 153),
                           text="AAPL",
                           fontSize=10,
                           x=11,
                           y=35,
                           appWidth=112,
                           appHeight=128,
                           centered=False,
                           strong=False)

        applePrice = label(color=(255, 255, 255),
                           text=apple,
                           fontSize=12,
                           x=11,
                           y=51,
                           appWidth=112,
                           appHeight=128,
                           centered=False,
                           strong=True)

        box1 = layer(width=98,
                     height=43,
                     x=7,
                     y=27,
                     color1=(255, 255, 255, 25),
                     radius=0.2,
                     isSublayer=True)

        googleTitle = label(color=(153, 153, 153),
                            text="GOOG",
                            fontSize=10,
                            x=11,
                            y=82,
                            appWidth=112,
                            appHeight=128,
                            centered=False,
                            strong=False)

        googlePrice = label(color=(255, 255, 255),
                            text=google,
                            fontSize=12,
                            x=11,
                            y=97,
                            appWidth=112,
                            appHeight=128,
                            centered=False,
                            strong=True)

        box2 = layer(width=98,
                     height=43,
                     x=7,
                     y=74,
                     color1=(255, 255, 255, 25),
                     radius=0.2,
                     isSublayer=True)

        self.subLayerList = [
            appTitle, appleTitle, applePrice, box1, googleTitle, googlePrice,
            box2
        ]
コード例 #16
0
 def add_layer(self, layer_size, a_type, drp=0.0):
     self.layer.append(layer(self.last_layer_size, layer_size, a_type, drp))
     self.layers = self.layers + 1
     self.last_layer_size = layer_size
コード例 #17
0
ファイル: linearwrap.py プロジェクト: quanlzheng/tensorpack
 def layer_func(name, *args, **kwargs):
     ret = layer(name, self._t, *args, **kwargs)
     return LinearWrap(ret)
コード例 #18
0
 def addLayer(self, num_neuron,input_size):
     self.layers.append(layer(num_neuron,input_size))
コード例 #19
0
    def train(self):
        #sigmoid needs the [-1,1] range, not the [0,1]
        self.xAll = normalize(self.xAll)

        #mlp.labels=np.matrix(y).T

        #initialize the network
        hidden_layer1 = layer()
        hidden_layer2 = layer()
        output_layer = layer()

        train_samples = self.xAll.shape[0]
        hidden_layer1.inputDim = self.xAll.shape[1]
        hidden_layer1.neurons = 10
        hidden_layer1.activation = self.props

        hidden_layer2.inputDim = hidden_layer1.neurons
        hidden_layer2.neurons = 5
        hidden_layer2.activation = self.props

        output_layer.inputDim = hidden_layer2.neurons
        output_layer.neurons = self.labels.shape[1]
        output_layer.activation = self.props

        batch_id = 0
        begin = 0
        end = 0
        #training
        for i in xrange(self.max_itration):
            if end >= train_samples:
                #batch_id=begin%train_samples
                batch_id = 0
            else:
                batch_id = batch_id + 1
            begin = batch_id * self.batch
            end = (batch_id + 1) * self.batch
            x = self.xAll[begin:end, :]
            y = self.labels[begin:end, :]
            print train_samples
            print end
            hidden_layer1.input = x
            hidden_layer1.samples = x.shape[0]
            hidden_layer1_output = hidden_layer1.forward()

            hidden_layer2.input = hidden_layer1_output
            hidden_layer2.samples = x.shape[0]
            hidden_layer2_output = hidden_layer2.forward()

            output_layer.input = hidden_layer2_output
            output_layer.samples = x.shape[0]
            output_layer_output = output_layer.forward()

            error = y - output_layer_output
            mse = self.props.errorFunc(error)
            print 'mse ' + str(mse) + ' iteration ' + str(i + 1)
            self.errorList.append(mse)
            if mse <= self.error_goal:
                self.iteration = i + 1
                break
            output_layer.error = error

            output_layer.layer_output = output_layer_output
            L2_error = output_layer.backword()

            hidden_layer2.layer_output = hidden_layer2_output
            hidden_layer2.error = L2_error
            L1_error = hidden_layer2.backword()

            hidden_layer1.layer_output = hidden_layer1_output
            hidden_layer1.error = L1_error
            hidden_layer1.backword()

        self.hidden_layer1 = hidden_layer1
        self.hidden_layer2 = hidden_layer2
        self.output_layer = output_layer