def nn_init(self, layer, initializer_option):
        if initializer_option is None:
            return
        if type(initializer_option) == pyparsing.ParseResults and type(
                initializer_option[0]) == hypergan.parser.Pattern:
            args = [initializer_option[0].layer_name
                    ] + initializer_option[0].args
            options = hc.Config(initializer_option[0].options)
        else:
            args = [initializer_option]
            options = hc.Config({})

        layer_data = layer.weight.data

        if args[0] == "uniform":
            a = float(args[1])
            b = float(args[2])
            nn.init.uniform_(layer_data, a, b)
        elif args[0] == "normal":
            mean = float(args[1])
            std = float(args[2])
            nn.init.normal_(layer_data, mean, std)
        elif args[0] == "constant":
            val = float(args[1])
            nn.init.constant_(layer_data, val)
        elif args[0] == "ones":
            nn.init.ones_(layer_data)
        elif args[0] == "zeros":
            nn.init.zeros_(layer_data)
        elif args[0] == "eye":
            nn.init.eye_(layer_data)
        elif args[0] == "dirac":
            nn.init.dirac_(layer_data)
        elif args[0] == "xavier_uniform":
            gain = nn.init.calculate_gain(options.gain or "relu")
            nn.init.xavier_uniform_(layer_data, gain=gain)
        elif args[0] == "xavier_normal":
            gain = nn.init.calculate_gain(options.gain or "relu")
            nn.init.xavier_normal_(layer_data, gain=gain)
        elif args[0] == "kaiming_uniform":
            a = 0  #TODO wrong
            nn.init.kaiming_uniform_(layer_data,
                                     mode=(options.mode or "fan_in"),
                                     nonlinearity=options.gain or "relu")
        elif args[0] == "kaiming_normal":
            a = 0  #TODO wrong
            nn.init.kaiming_normal_(layer_data,
                                    mode=(options.mode or "fan_in"),
                                    nonlinearity=options.gain or "relu")
        elif args[0] == "orthogonal":
            if "gain" in options:
                gain = nn.init.calculate_gain(options["gain"])
            else:
                gain = 1
            nn.init.orthogonal_(layer_data, gain=gain)
        else:
            print("Warning: No initializer found for " + args[0])
        if "gain" in options:
            layer_data.mul_(nn.init.calculate_gain(options["gain"]))
        return NoOp()
示例#2
0
 def __init__(self, gan, config, name=None, input=None, reuse=None, x=None, g=None, features=[], skip_connections=[]):
     self.layers = []
     self.skip_connections = skip_connections
     self.layer_options = {}
     self.layer_ops = {
         "relational": self.layer_relational,
         "minibatch": self.layer_minibatch,
         "phase_shift": self.layer_phase_shift,
         "conv": self.layer_conv,
         "zeros": self.layer_zeros,
         "zeros_like": self.layer_zeros_like,
         "control": self.layer_controls,
         "linear": self.layer_linear,
         "identity": self.layer_identity,
         "double_resolution": self.layer_double_resolution,
         "attention": self.layer_attention,
         "adaptive_instance_norm": self.layer_adaptive_instance_norm,
         "subpixel": self.layer_subpixel,
         "pixel_norm": self.layer_pixel_norm,
         "gram_matrix": self.layer_gram_matrix,
         "unpool": self.layer_unpool,
         "crop": self.layer_crop,
         "resize_images": self.layer_resize_images,
         "concat_noise": self.layer_noise,
         "concat": self.layer_concat,
         "variational_noise": self.layer_variational_noise,
         "variational": self.layer_variational,
         "noise": self.layer_noise,
         "pad": self.layer_pad,
         "fractional_avg_pool": self.layer_fractional_avg_pool,
         "two_sample_stack": self.layer_two_sample_stack,
         "bicubic_conv": self.layer_bicubic_conv,
         "conv_double": self.layer_conv_double,
         "conv_reshape": self.layer_conv_reshape,
         "reshape": self.layer_reshape,
         "conv_dts": self.layer_conv_dts,
         "deconv": self.layer_deconv,
         "resize_conv": self.layer_resize_conv,
         "squash": self.layer_squash,
         "add": self.layer_add,
         "avg_pool": self.layer_avg_pool,
         "reference": self.layer_reference,
         "image_statistics": self.layer_image_statistics,
         "combine_features": self.layer_combine_features,
         "resnet": self.layer_resnet,
         "layer_filter": self.layer_filter,
         "turing_test": self.layer_turing_test,
         "activation": self.layer_activation,
         "knowledge_base": self.layer_knowledge_base,
         "const": self.layer_const,
         "progressive_replace": self.layer_progressive_replace
         }
     self.features = features
     self.controls = {}
     self.named_layers = {}
     if not hasattr(gan, "named_layers"):
         gan.named_layers = {}
     self.subnets = hc.Config(hc.Config(config).subnets or {})
示例#3
0
    def layer_resnet(self, net, args, options):
        options = hc.Config(options)
        config = self.config
        ops = self.ops
        depth = int(args[0])
        activation_s = options.activation or self.ops.config_option("activation")
        activation = self.ops.lookup(activation_s)
        stride = options.stride or 1
        stride = int(stride)
        shortcut = net
        initializer = None # default to global

        if self.ops.config_option("avg_pool"):
            net = ops.conv2d(net, 3, 3, 1, 1, depth, initializer=initializer)
            if stride != 1:
                ksize = [1,stride,stride,1]
                net = tf.nn.avg_pool(net, ksize=ksize, strides=ksize, padding='SAME')
        else:
            net = ops.conv2d(net, 3, 3, stride, stride, depth, initializer=initializer)
        net = activation(net)
        net = ops.conv2d(net, 3, 3, 1, 1, depth, initializer=initializer)
        if ops.shape(net)[-1] != ops.shape(shortcut)[-1] or stride != 1:
            if self.ops.config_option("avg_pool"):
                shortcut = ops.conv2d(shortcut, 3, 3, 1, 1, depth, initializer=initializer)
                if stride != 1:
                    ksize = [1,stride,stride,1]
                    shortcut = tf.nn.avg_pool(shortcut, ksize=ksize, strides=ksize, padding='SAME')
            else:
                shortcut = ops.conv2d(shortcut, 3, 3, stride, stride, depth, initializer=initializer)
        net = shortcut + net
        net = activation(net)

        return net
示例#4
0
    def __init__(self, component, args, options):
        super(ResizableStack, self).__init__(component, args, options)
        self.size = LayerShape(component.gan.channels(), component.gan.height(), component.gan.width())
        self.max_channels = options.max_channels or 256
        self.segment_channels = options.segment_channels or 5
        self.style = options.style or "w"

        layers = []

        sizes = self.sizes(component.current_size.height, component.current_size.width, component.gan.height(), component.gan.width(), self.segment_channels * 2 * component.gan.channels())
        print("SIZES", sizes)
        for i, size in enumerate(sizes[1:]):
            c = min(size.channels, self.max_channels)
            upsample = hg.layers.Upsample(component, [], hc.Config({"w": size.width, "h": size.height}))
            component.current_size = upsample.output_size() #TODO abstract input_size
            if options.normalize != False:
                _, add = component.parse_layer("add self (ez_norm initializer=(xavier_normal) style=" + self.style + ")")
            _, conv = component.parse_layer("conv2d " + str(size.channels) + " padding=0 initializer=(xavier_normal)")

            if options.normalize == False:
                layers += [upsample, conv]
            else:
                layers += [upsample, add, conv]
            if i < len(sizes) - 2:
                layers += [nn.ReLU()]

        layers += [hg.layers.SegmentSoftmax(component, [component.gan.channels()], {})]
        self.layers = nn.ModuleList(layers)
示例#5
0
 def test_run(self):
     with self.test_session():
         gan = mock_gan()
         args = hc.Config({"size": "1"})
         cli = hg.CLI(gan, args)
         cli.run()
         self.assertEqual(cli.gan, gan)
    def layer_reference(self, net, args, options):
        options = hc.Config(options)

        obj = self
        if "src" in options:
            obj = getattr(self.gan, options.src)
        return obj.layer(options.name)
  def __init__(self, gan=None, config=None, trainer=None, name="ProgressCompressTrainHook"):
    super().__init__(config=config, gan=gan, trainer=trainer, name=name)
    d_loss = []

    self.x = tf.Variable(tf.zeros_like(gan.inputs.x))
    self.g = tf.Variable(tf.zeros_like(gan.generator.sample))

    stacked = tf.concat([self.gan.inputs.x, self.gan.generator.sample], axis=0)
    self.assign_x = tf.assign(self.x, gan.inputs.x)
    self.assign_g = tf.assign(self.g, gan.generator.sample)
    self.re_init_d = [d.initializer for d in gan.discriminator.variables()]
    gan.hack = self.g

    self.assign_knowledge_base = []

    bs = gan.batch_size()
    real = gan.discriminator.named_layers['knowledge_base_target']#tf.reshape(gan.loss.sample[:2], [2,-1])
    _inputs = hc.Config({'x':real})
    inner_gan = KBGAN(config=self.config.knowledge_base, inputs=_inputs, x=real, latent=stacked)
    self.kb_loss = inner_gan.loss
    self.kb = inner_gan.generator
    self.trainer = inner_gan.trainer
    variables = inner_gan.variables()
    #variables += self.kb.variables()

    for c in gan.components:
        if hasattr(c, 'knowledge_base'):
            for name, net in c.knowledge_base:
                assign = self.kb.named_layers[name]
                if self.ops.shape(assign)[0] > self.ops.shape(net)[0]:
                    assign = tf.slice(assign,[0 for i in self.ops.shape(net)] , [self.ops.shape(net)[0]]+self.ops.shape(assign)[1:])
                self.assign_knowledge_base.append(tf.assign(net, assign))

    self.gan.add_metric('d_kb', self.kb_loss.sample[0])
    self.gan.add_metric('g_kb', self.kb_loss.sample[1])
示例#8
0
    def __init__(self, config={}, device="/gpu:0"):
        config = hc.Config(config)
        dtype = config.dtype or "float32"
        initializer = config.initializer or 'orthogonal'
        orthogonal_gain = config.orthogonal_gain or 1.0
        random_stddev = config.random_stddev or 0.02

        self.dtype = self.parse_dtype(dtype)
        self.scope_count = 0
        self.description = ''
        self.weights = []
        self.biases = []
        self.device = config.device
        self.initialized = False
        self._reuse = False
        self.reuse_scope_count = 0
        self.reuse_context = 0
        self.config = config
        if initializer == 'orthogonal':
            self.initializer = self.orthogonal_initializer(orthogonal_gain)
        elif initializer == 'he_normal':
            self.initializer = self.he_normal_initializer()
        elif initializer == 'xavier':
            self.initializer = self.xavier_initializer()
        else:
            self.initializer = self.random_initializer(random_stddev)
    def layer_linear(self, net, args, options):
        options = hc.Config(options)
        shape = [int(x) for x in str(args[0]).split("*")]
        bias = True
        if options.bias == False:
            bias = False
        output_size = 1
        for dim in shape:
            output_size *= dim
        layers = []
        if len(self.current_size.dims) != 1:
            layers += [nn.Flatten()]

        layers += [
            nn.Linear(options.input_size or self.current_size.size(),
                      output_size,
                      bias=bias)
        ]
        self.nn_init(layers[-1], options.initializer)
        self.current_size = LayerShape(*list(reversed(shape)))
        if len(shape) != 1:
            layers.append(Reshape(*self.current_size.dims))

        if self.is_latent:
            self._latent_parameters += [layers[0].weight]
            self.is_latent = False

        return nn.Sequential(*layers)
示例#10
0
    def layer_conv3d(self, net, args, options):
        if len(args) > 0:
            channels = args[0]
        else:
            channels = self.current_size.channels
        options = hc.Config(options)
        stride = options.stride or 1
        fltr = options.filter or 3
        dilation = 1

        padding = options.padding or 1  #self.get_same_padding(self.current_width, self.current_width, stride, dilation)
        if options.padding0:
            padding = [options.padding0, padding, padding]
        if options.stride0:
            stride = [options.stride0, stride, stride]
        else:
            stride = [stride, stride, stride]

        layers = [
            nn.Conv3d(options.input_channels or self.current_size.channels,
                      channels,
                      fltr,
                      stride,
                      padding=padding)
        ]
        self.nn_init(layer, options.initializer)
        self.current_size = LayerShape(
            frames, channels, self.current_size.height // stride[1],
            self.current_size.width // stride[2]
        )  #TODO this doesn't work, what is frames? Also chw calculation like conv2d
        return nn.Sequential(*layers)
示例#11
0
    def layer_conv1d(self, net, args, options):
        if len(args) > 0:
            channels = args[0]
        else:
            channels = self.current_size.channels
        print("Options:", options)
        options = hc.Config(options)
        stride = options.stride or 1
        fltr = options.filter or 3
        dilation = 1

        padding = 1
        if options.padding is not None:
            padding = options.padding

        layers = [
            nn.Conv1d(options.input_channels or self.current_size.channels,
                      channels,
                      fltr,
                      stride,
                      padding=padding)
        ]
        self.nn_init(layers[-1], options.initializer)
        h, _ = self.conv_output_shape(
            (self.current_size.height, self.current_size.height),
            options.filter or 3, stride, padding, 1)
        self.current_size = LayerShape(channels, h)
        return nn.Sequential(*layers)
示例#12
0
    def layer_conv2d(self, net, args, options):
        if len(args) > 0:
            channels = args[0]
        else:
            channels = self.current_size.channels
        options = hc.Config(options)
        stride = 1
        if options.stride is not None:
            stride = options.stride
        filter = 3
        if options.filter is not None:
            filter = options.filter
        padding = 1
        if options.padding is not None:
            padding = options.padding

        dilation = 1

        layer = nn.Conv2d(options.input_channels or self.current_size.channels,
                          channels,
                          filter,
                          stride,
                          padding=(padding, padding))
        self.nn_init(layer, options.initializer)
        h, w = self.conv_output_shape(
            (self.current_size.height, self.current_size.width), filter,
            stride, padding, dilation)
        self.current_size = LayerShape(channels, h, w)
        return layer
    def layer_two_sample_stack(self, net, args, options):
        options = hc.Config(options)

        def _slice(_net):
            s = self.ops.shape(_net)
            s[0] = s[0] // 2
            _net1 = tf.slice(_net, [0, 0, 0, 0], s)
            _net2 = tf.slice(_net, [s[0], 0, 0, 0], s)
            return _net1, _net2

        net1, net2 = _slice(net)
        net1a, net1b = _slice(net1)
        net2a, net2b = _slice(net2)
        if options.mixup:
            alpha = tf.random_uniform([1], 0, 1)
            t1 = alpha * net1a + (1 - alpha) * net1b
            t2 = alpha * net2a + (1 - alpha) * net2b
            t1 = tf.reshape(t1, self.ops.shape(net1b))
            t2 = tf.reshape(t2, self.ops.shape(net2b))
        else:
            t1 = tf.concat([net1a, net1b], axis=3)
            t2 = tf.concat([net2a, net2b], axis=3)
        # hack fixes shape expectations
        #t1 = tf.concat([t1,t1], axis=0)
        #t2 = tf.concat([t2,t2], axis=0)
        target = tf.concat([t1, t2], axis=0)
        s = self.ops.shape(net)

        return target
    def layer_subpixel(self, net, args, options):
        options = hc.Config(options)
        depth = int(args[0])
        config = self.config
        activation = options.activation or config.defaults.activation
        r = options.r or 2
        r = int(r)

        def _PS(X, r, n_out_channel):
            if n_out_channel >= 1:
                bsize, a, b, c = X.get_shape().as_list()
                bsize = tf.shape(X)[
                    0]  # Handling Dimension(None) type for undefined batch dim
                Xs = tf.split(X, r, 3)  #b*h*w*r*r
                Xr = tf.concat(Xs, 2)  #b*h*(r*w)*r
                X = tf.reshape(
                    Xr,
                    (bsize, r * a, r * b, n_out_channel))  # b*(r*h)*(r*w)*c
            return X

        args[0] = depth * (r**2)
        if (activation == 'crelu' or activation == 'double_sided'):
            args[0] //= 2
        y1 = self.layer_conv(net, args, options)
        ps = _PS(y1, r, depth)
        return ps
示例#15
0
文件: cli.py 项目: halflife2/HyperGAN
    def __init__(self, gan, args={}):
        self.samples = 0
        self.steps = 0
        self.gan = gan

        args = hc.Config(args)
        self.args = args

        crop =  self.args.crop

        self.config_name = self.args.config or 'default'
        self.method = args.method or 'test'
        self.total_steps = args.steps or -1
        self.sample_every = self.args.sample_every or 100

        self.sampler = CLI.sampler_for(args.sampler)(self.gan)

        self.validate()
        if self.args.save_file:
            self.save_file = self.args.save_file
        else:
            default_save_path = os.path.abspath("saves/"+self.config_name)
            self.save_file = default_save_path + "/model.ckpt"
            self.create_path(self.save_file)

        title = "[hypergan] " + self.config_name
        GlobalViewer.title = title
        GlobalViewer.enabled = self.args.viewer
示例#16
0
 def layer_layer(self, net, args, options):
     options = hc.Config(options)
     if "src" in options:
         obj = getattr(self.gan, options.src)
     else:
         obj = self
     return obj.layer(args[0])
    def layer_conv_reshape(self, net, args, options):
        options = hc.Config(options)
        config = self.config
        ops = self.ops

        activation_s = options.activation or config.defaults.activation
        activation = self.ops.lookup(activation_s)

        stride = options.stride or config.defaults.stride or [1, 1]
        fltr = options.filter or config.defaults.filter or [3, 3]
        if type(fltr) == type(""):
            fltr = [int(fltr), int(fltr)]
        if type(stride) == type(""):
            stride = [int(stride), int(stride)]
        depth = int(args[0])

        initializer = None  # default to global

        trainable = True
        if options.trainable == 'false':
            trainable = False
        net = ops.conv2d(net,
                         fltr[0],
                         fltr[1],
                         stride[0],
                         stride[1],
                         depth * 4,
                         initializer=initializer,
                         trainable=trainable)
        s = ops.shape(net)
        net = tf.reshape(net, [s[0], s[1] * 2, s[2] * 2, depth])
        if activation:
            #net = self.layer_regularizer(net)
            net = activation(net)
        return net
示例#18
0
    def create_trainer(self, cycloss, z_cycloss, encoder, generator,
                       encoder_loss, standard_loss, standard_discriminator,
                       encoder_discriminator):

        metrics = []
        metrics.append(standard_loss.metrics)

        d_vars = standard_discriminator.variables()
        g_vars = generator.variables() + encoder.variables()
        print("D_VARS", d_vars)
        print("G_VARS", g_vars)
        #d_loss = standard_loss.d_loss
        #g_loss = standard_loss.g_loss + cycloss
        loss1 = ("g_loss", standard_loss.g_loss)
        loss2 = ("d_loss", standard_loss.d_loss)
        loss = hc.Config({
            'sample': [standard_loss.d_loss, standard_loss.g_loss],
            'metrics': {
                'g_loss': loss1[1],
                'd_loss': loss2[1]
            }
        })
        trainer = ConsensusTrainer(self,
                                   self.config.trainer,
                                   loss=loss,
                                   g_vars=g_vars,
                                   d_vars=d_vars)
        return trainer
示例#19
0
    def __init__(self, gan, args={}):
        self.samples = 0
        self.steps = 0
        self.gan = gan
        if gan is not None:
            self.gan.cli = self

        args = hc.Config(args)
        self.args = args

        crop = self.args.crop

        self.config_name = self.args.config or 'default'
        self.method = args.method or 'test'
        self.total_steps = args.steps or -1
        self.sample_every = self.args.sample_every or 100

        self.sampler_name = args.sampler
        self.sampler = None
        self.validate()
        if self.args.save_file:
            self.save_file = self.args.save_file
        else:
            default_save_path = os.path.abspath("saves/" + self.config_name)
            self.save_file = default_save_path + "/model.ckpt"
            self.create_path(self.save_file)
        if self.gan is not None:
            self.gan.save_file = self.save_file

        title = "[hypergan] " + self.config_name
        GlobalViewer.enable_menu = self.args.menu
        GlobalViewer.title = title
        GlobalViewer.viewer_size = self.args.viewer_size
        GlobalViewer.enabled = self.args.viewer
        GlobalViewer.zoom = self.args.zoom
示例#20
0
    def layer_pad(self, net, args, options):
        options = hc.Config(options)
        s = self.ops.shape(net)
        sizew = s[1]//2
        sizeh = s[2]//2
        net,_,_ = tf.pad(net, [[0,0],[ sizew,sizew],[ sizeh,sizeh],[ 0,0]])

        return net 
示例#21
0
    def lookup(self, symbol):
        if symbol == None:
            return None

        if type(symbol) == type([]):
            return [self.lookup(k) for k in symbol]

        if type(symbol) == type({}) or type(symbol) == hc.Config:
            return hc.Config(
                {k: self.lookup(symbol[k])
                 for k in symbol.keys()})

        if type(symbol) != type(""):
            return symbol

        if symbol.startswith('function:'):
            return self.lookup_function(symbol)

        if symbol.startswith('class:'):
            return self.lookup_class(symbol)

        if symbol == 'tanh':
            return tf.nn.tanh
        if symbol == 'sigmoid':
            return tf.nn.sigmoid
        if symbol == 'batch_norm':
            return layer_regularizers.batch_norm_1
        if symbol == 'layer_norm':
            return layer_regularizers.layer_norm_1
        if symbol == "crelu":
            return tf.nn.crelu
        if symbol == "prelu":
            return self.prelu()
        if symbol == "selu":
            return selu
        if symbol == "lrelu":
            return lrelu
        if symbol == "relu":
            return tf.nn.relu
        if symbol == 'square':
            return tf.square
        if symbol == 'reduce_mean':
            return tf.reduce_mean
        if symbol == 'reduce_min':
            return tf.reduce_min
        if symbol == 'reduce_sum':
            return tf.reduce_sum
        if symbol == 'reduce_logsumexp':
            return tf.reduce_logsumexp
        if symbol == 'reduce_linear':
            return self.reduce_linear()

        if symbol == 'l1_distance':
            return l1_distance
        if symbol == 'l2_distance':
            return l2_distance

        return symbol
示例#22
0
 def parse_layer(self, layer_defn):
     print("Parsing layer:", layer_defn)
     parsed = self.parser.parse_string(layer_defn)
     parsed.parsed_options = hc.Config(parsed.options)
     parsed.layer_defn = layer_defn
     print("Parsed layer:", parsed.to_list())
     layer = self.build_layer(parsed.layer_name, parsed.args,
                              parsed.parsed_options)
     return parsed, layer
示例#23
0
 def get_array(self, name, shape=None):
     if shape:
         shape = [int(x) for x in shape]
     connections = hc.Config(self.connections)
     if name in connections:
         conns = connections[name]
     else:
         conns = []
     return [con[1] for con in conns if shape is None or con[0] == shape]
示例#24
0
 def parse_opts(self, opts):
     options = {}
     for opt in opts.split(","):
         if opt == "":
             continue
         name, val = opt.split("=")
         value = self.configurable_param(val)
         options[name]=value
     return hc.Config(options)
 def layer_resize_images(self, net, args, options):
     options = hc.Config(options)
     if len(args) == 0:
         w = self.gan.width()
         h = self.gan.height()
     else:
         w = int(args[0])
         h = int(args[1])
     method = options.method or 1
     return tf.image.resize_images(net, [w, h], method=method)
示例#26
0
    def layer_reference(self, net, args, options):
        options = hc.Config(options)

        obj = self
        if "src" in options:
            obj = getattr(self.gan, options.src)
        if "resize_images" in options:
            return self.layer_resize_images(getattr(obj, options.name), options["resize_images"].split("*"), options)
        else:
            return obj.layer(options.name)
示例#27
0
 def parse_args(self, strs):
     options = hc.Config({})
     args = []
     for x in strs:
         if '=' in x:
             lhs, rhs = x.split('=')
             options[lhs] = rhs
         else:
             args.append(x)
     return args, options
示例#28
0
    def forward_loss(self):
        losses = []
        for d_real, d_fake in zip(d_reals, d_fakes):
            loss = self.create_component(config.loss, discriminator=d, split=len(d_terms))
            d_loss, g_loss = loss.forward(d_real, d_fake)
            d_loss = [self.configurable_param(config.term_gammas[i]) * d_loss, self.configurable_param(config.term_gammas[i]) * g_loss]
            losses += [[d_loss, g_loss]]

        self.loss = hc.Config({
            'sample': [sum([l.sample[0] for l in losses]), sum([l.sample[1] for l in losses])]
            })
    def layer_zeros(self, net, args, options):
        options = hc.Config(options)
        config = self.config
        ops = self.ops

        self.ops.activation_name = options.activation_name
        reshape = [ops.shape(net)[0]] + [int(x) for x in args[0].split("*")]
        size = reduce(operator.mul, reshape)
        net = tf.zeros(reshape)

        return net
示例#30
0
 def layer_const(self, net, args, options):
     options = hc.Config(options)
     s  = [1] + [int(x) for x in args[0].split("*")]
     trainable = True
     if "trainable" in options and options["trainable"] == "false":
         trainable = False
     initializer = None
     if "initializer" in options and options["initializer"] is not None:
         initializer = self.ops.lookup_initializer(options["initializer"], options)
     with tf.variable_scope(self.ops.generate_name(), reuse=self.ops._reuse):
         return tf.tile(self.ops.get_weight(s, name='const', trainable=trainable, initializer=initializer), [self.gan.batch_size(), 1,1,1])