Exemplo n.º 1
0
def test_bug_2009_06_02_trac_387():
    y = tensor.lvector('y')
    f = theano.function([y],
            tensor.int_div(
                tensor.DimShuffle(y[0].broadcastable, ['x'])(y[0]), 2))
    sys.stdout.flush()
    print(f(numpy.ones(1, dtype='int64') * 3))
def test_bug_2009_06_02_trac_387():
    y = tensor.lvector('y')
    f = theano.function([y],
                        tensor.int_div(
                            tensor.DimShuffle(y[0].broadcastable, ['x'])(y[0]),
                            2))
    print(f(numpy.ones(1, dtype='int64') * 3))
Exemplo n.º 3
0
def test_bug_2009_06_02_trac_387():
    y = tensor.lvector("y")
    f = theano.function([y],
                        tensor.int_div(
                            tensor.DimShuffle(y[0].broadcastable, ["x"])(y[0]),
                            2))
    print(f(np.ones(1, dtype="int64") * 3))
Exemplo n.º 4
0
 def input_row_from_variables(ori_ip,dest_ip,ori_lat,ori_long,dest_lat,dest_long,ori_type,dest_type,dist):
     '''Create an input row for the MLP from the inputs'''
     
     input_row = tensor.zeros([input_size])
     
     offset = 0
     
     ips = [ori_ip,dest_ip]
     for ip in ips:
         for _ in range(4):
             input_row = add_one_shot(input_row, offset, tensor.mod(ip,256))
             ip = tensor.int_div(ip,256)
             offset += 256
     
     for lat_,long_ in [(ori_lat,ori_long),(dest_lat,dest_long)]:
         translated_lat = tensor.iround((coordinate_size-1)*(lat_/180 + 0.5))
         input_row = add_thermo(input_row, offset,translated_lat)
         offset += coordinate_size
         
         translated_long = tensor.iround((coordinate_size-1)*(long_/360 + 0.5))
         input_row = add_thermo(input_row, offset,translated_long)
         offset += coordinate_size
     
     for type_ in [ori_type,dest_type]:
         add_one_shot(input_row, offset, type_ +1)
         offset += type_size
     
     translated_dist = tensor.iround((dest_size-1)*(tensor.minimum(1,dist/max_earth_distance)))
     input_row = add_thermo(input_row, offset,translated_dist)
     
     #could be useful if we want to add something
     offset +=dest_size
     
     return input_row
Exemplo n.º 5
0
def test_bug_2009_06_02_trac_387():

    y = tensor.lvector('y')
    #f = theano.function([y], tensor.stack(y[0] / 2))
    #f = theano.function([y], tensor.join(0,tensor.shape_padleft(y[0] / 2,1)))
    f = theano.function([y], tensor.int_div(tensor.DimShuffle(y[0].broadcastable, ['x'])(y[0]), 2))
    sys.stdout.flush()
    print f(numpy.ones(1, dtype='int64') * 3)
Exemplo n.º 6
0
def define_loss_generator(net, generated_img, true_image, loss_type='log'):
    assert (loss_type in ['log', 'sqr'])
    input = T.concatenate([generated_img, true_image], axis=0)
    output = lasagne.layers.get_output(net['out'], inputs=input)
    batch_size = T.int_div(input.shape[0], 2)
    if loss_type == 'sqr':
        loss = (T.sqr(output[:batch_size] - 1)).mean()
    else:
        loss = -T.log(output[:batch_size] + eps).mean()

    return loss
Exemplo n.º 7
0
    def train_givens(self, batch_index, batch_size):
        '''
        batch_index is a theano_variable.
        '''
        # compute the gpu batch index
        # these will all be theano variables
        solver_batches_per_gpu_batch = T.cast(T.int_div(self.num_GPU_store,batch_size), 'int32')
        real_batch_index = T.cast(T.mod(batch_index, solver_batches_per_gpu_batch), 'int32')

        givens = {self.X_batch_var:self.GPU_X_train[real_batch_index*batch_size:(real_batch_index+1)*batch_size]}
        givens[self.y_batch_var] = self.GPU_y_train[real_batch_index*batch_size:(real_batch_index+1)*batch_size]
        return givens
Exemplo n.º 8
0
def define_loss_discriminator(net,
                              generated_img,
                              input_to_discriminator,
                              loss_type='log'):
    assert (loss_type in ['log', 'sqr'])
    input = T.concatenate([generated_img, input_to_discriminator], axis=0)
    output = lasagne.layers.get_output(net['out'], inputs=input)
    batch_size = T.int_div(input.shape[0], 2)
    if loss_type == 'log':
        true_loss = -T.log(output[batch_size:] + eps).mean()
        generated_loss = -T.log(1 - output[:batch_size] + eps).mean()
    else:
        true_loss = (T.sqr(output[batch_size:] - 1)).mean()
        generated_loss = (T.sqr(output[:batch_size])).mean()

    return true_loss + generated_loss
Exemplo n.º 9
0
    def input_row_from_variables(ori_ip, dest_ip, ori_lat, ori_long, dest_lat,
                                 dest_long, ori_type, dest_type, dist,
                                 latency):
        '''Create an input row for the MLP from the inputs'''

        input_row = tensor.zeros([input_size])

        offset = 0

        ips = [ori_ip, dest_ip]
        for ip in ips:
            for _ in range(4):
                input_row = add_one_shot(input_row, offset,
                                         tensor.mod(ip, 256))
                ip = tensor.int_div(ip, 256)
                offset += 256

        for lat_, long_ in [(ori_lat, ori_long), (dest_lat, dest_long)]:
            translated_lat = tensor.iround(
                (coordinate_size - 1) * (lat_ / 180 + 0.5))
            input_row = add_thermo(input_row, offset, translated_lat)
            offset += coordinate_size

            translated_long = tensor.iround(
                (coordinate_size - 1) * (long_ / 360 + 0.5))
            input_row = add_thermo(input_row, offset, translated_long)
            offset += coordinate_size

        for type_ in [ori_type, dest_type]:
            input_row = add_one_shot(input_row, offset, type_ + 1)
            offset += type_size

        translated_dist = tensor.iround(
            (dist_size - 1) * (tensor.minimum(1, dist / max_earth_distance)))
        input_row = add_thermo(input_row, offset, translated_dist)
        offset += dist_size

        translated_dist = tensor.iround(
            (small_dist_size - 1) *
            (tensor.minimum(1, dist / max_earth_distance)))
        input_row = add_thermo(input_row, offset, translated_dist)

        #could be useful if we want to add something
        offset += small_dist_size

        return input_row
Exemplo n.º 10
0
    def train_givens(self, batch_index, batch_size):
        '''
        batch_index is a theano_variable.
        '''
        # compute the gpu batch index
        # these will all be theano variables
        solver_batches_per_gpu_batch = T.cast(
            T.int_div(self.num_GPU_store, batch_size), 'int32')
        real_batch_index = T.cast(
            T.mod(batch_index, solver_batches_per_gpu_batch), 'int32')

        givens = {
            self.X_batch_var:
            self.GPU_X_train[real_batch_index *
                             batch_size:(real_batch_index + 1) * batch_size]
        }
        givens[self.y_batch_var] = self.GPU_y_train[real_batch_index *
                                                    batch_size:
                                                    (real_batch_index + 1) *
                                                    batch_size]
        return givens
Exemplo n.º 11
0
 def input_row_from_variables(ip_, lat_, long_, type_):
     '''Create an input row for the MLP from the inputs'''
     
     input_row = tensor.zeros([input_size])
     offset = 0
     
     for _ in range(4):
         input_row = add_one_shot(input_row, offset, tensor.mod(ip_, 256))
         ip_ = tensor.int_div(ip_, 256)
         offset += 256
     
     translated_lat = tensor.iround((coordinate_size-1) * (lat_/180 + 0.5))
     input_row = add_thermo(input_row, offset, translated_lat)
     offset += coordinate_size
     
     translated_long = tensor.iround((coordinate_size-1) * (long_/360 + 0.5))
     input_row = add_thermo(input_row, offset, translated_long)
     offset += coordinate_size
     
     input_row = add_one_shot(input_row, offset, type_ +1)
     offset += type_size
     
     return input_row
Exemplo n.º 12
0
 def GetJacobi(i, B):
     y, x = T.int_div(i, N), T.mod(i, N)  #divmod(i,N)
     B = T.set_subtensor(B[y, N + x], 1)
     B2 = self(B)
     B = T.set_subtensor(B[y, N + x], 0)
     return B2[:N, N:].reshape((N * N, ))