def _generate_forward_declarations_for_binding_traits(self):
        # A list of (builder_type, needs_runtime_cast)
        type_arguments = []

        for domain in self.domains_to_generate():
            declarations_to_generate = filter(lambda decl: self.type_needs_shape_assertions(decl.type), domain.type_declarations)

            for type_declaration in declarations_to_generate:
                for type_member in type_declaration.type_members:
                    if isinstance(type_member.type, EnumType):
                        type_arguments.append((Generator.type_builder_string_for_type_member(type_member, type_declaration), False))

                if isinstance(type_declaration.type, ObjectType):
                    type_arguments.append((Generator.type_builder_string_for_type(type_declaration.type), Generator.type_needs_runtime_casts(type_declaration.type)))

        struct_keywords = ['struct']
        function_keywords = ['static void']
        export_macro = self.model().framework.setting('export_macro', None)
        if export_macro is not None:
            struct_keywords.append(export_macro)
            #function_keywords[1:1] = [export_macro]

        lines = []
        for argument in type_arguments:
            lines.append('template<> %s BindingTraits<%s> {' % (' '.join(struct_keywords), argument[0]))
            if argument[1]:
                lines.append('static PassRefPtr<%s> runtimeCast(PassRefPtr<Inspector::InspectorValue> value);' % argument[0])
            lines.append('#if !ASSERT_DISABLED')
            lines.append('%s assertValueHasExpectedType(Inspector::InspectorValue*);' % ' '.join(function_keywords))
            lines.append('#endif // !ASSERT_DISABLED')
            lines.append('};')
        return '\n'.join(lines)
    def _generate_async_handler_declaration_for_command(self, command):
        callbackName = "%sCallback" % ucfirst(command.command_name)

        in_parameters = ['ErrorString&']
        for _parameter in command.call_parameters:
            in_parameters.append("%s in_%s" % (Generator.type_string_for_unchecked_formal_in_parameter(_parameter), _parameter.parameter_name))
        in_parameters.append("PassRefPtr<%s> callback" % callbackName)

        out_parameters = []
        for _parameter in command.return_parameters:
            out_parameters.append("%s %s" % (Generator.type_string_for_formal_async_parameter(_parameter), _parameter.parameter_name))

        class_components = ['class']
        export_macro = self.model().framework.setting('export_macro', None)
        if export_macro:
            class_components.append(export_macro)

        command_args = {
            'classAndExportMacro': ' '.join(class_components),
            'callbackName': callbackName,
            'commandName': command.command_name,
            'inParameters': ", ".join(in_parameters),
            'outParameters': ", ".join(out_parameters),
        }

        return Template(Templates.BackendDispatcherHeaderAsyncCommandDeclaration).substitute(None, **command_args)
 def __init__(self, generators=[], container=SimpleContainer(), database=CassandraConnection()):
     Generator.__init__(self)
     if isinstance(container, OrderContainer):
         self.container = container
     if isinstance(database, DatabaseConnection):
         self.database = database
         self.database.connect()
     if len(generators) > 0:
         self.generators = generators
     else:
         self.generators = [
             CommentLenGenerator(),
             CommentGenerator(),
             CurrencyPairGenerator(),
             DateTimeGenerator(),
             DescriptionGenerator(),
             DirectionGenerator(),
             DurationGenerator(),
             IdGenerator(),
             MagicalNumberGenerator(),
             PriceGenerator(),
             StatusGenerator(),
             TagLenGenerator(),
             TagGenerator(),
             TypeGenerator(),
         ]
     self.data = {}
    def _generate_handler_declaration_for_command(self, command, used_enum_names):
        if command.is_async:
            return self._generate_async_handler_declaration_for_command(command)

        lines = []
        parameters = ['ErrorString&']
        for _parameter in command.call_parameters:
            parameters.append("%s in_%s" % (Generator.type_string_for_unchecked_formal_in_parameter(_parameter), _parameter.parameter_name))

            if isinstance(_parameter.type, EnumType) and _parameter.parameter_name not in used_enum_names:
                lines.append(self._generate_anonymous_enum_for_parameter(_parameter, command))
                used_enum_names.add(_parameter.parameter_name)

        for _parameter in command.return_parameters:
            parameter_name = 'out_' + _parameter.parameter_name
            if _parameter.is_optional:
                parameter_name = 'opt_' + parameter_name
            parameters.append("%s %s" % (Generator.type_string_for_formal_out_parameter(_parameter), parameter_name))

            if isinstance(_parameter.type, EnumType) and _parameter.parameter_name not in used_enum_names:
                lines.append(self._generate_anonymous_enum_for_parameter(_parameter, command))
                used_enum_names.add(_parameter.parameter_name)

        command_args = {
            'commandName': command.command_name,
            'parameters': ", ".join(parameters)
        }
        lines.append('    virtual void %(commandName)s(%(parameters)s) = 0;' % command_args)
        return '\n'.join(lines)
    def _generate_async_dispatcher_class_for_domain(self, command, domain):
        out_parameter_assignments = []
        formal_parameters = []

        for parameter in command.return_parameters:
            param_args = {
                'frameworkPrefix': self.model().framework.setting('prefix'),
                'keyedSetMethod': Generator.keyed_set_method_for_type(parameter.type),
                'parameterName': parameter.parameter_name,
                'parameterType': Generator.type_string_for_stack_in_parameter(parameter),
            }

            formal_parameters.append('%s %s' % (Generator.type_string_for_formal_async_parameter(parameter), parameter.parameter_name))

            if parameter.is_optional:
                if Generator.should_use_wrapper_for_return_type(parameter.type):
                    out_parameter_assignments.append('    if (%(parameterName)s.isAssigned())' % param_args)
                    out_parameter_assignments.append('        jsonMessage->%(keyedSetMethod)s(ASCIILiteral("%(parameterName)s"), %(parameterName)s.getValue());' % param_args)
                else:
                    out_parameter_assignments.append('    if (%(parameterName)s)' % param_args)
                    out_parameter_assignments.append('        jsonMessage->%(keyedSetMethod)s(ASCIILiteral("%(parameterName)s"), %(parameterName)s);' % param_args)
            elif parameter.type.is_enum():
                out_parameter_assignments.append('    jsonMessage->%(keyedSetMethod)s(ASCIILiteral("%(parameterName)s"), Inspector::TypeBuilder::get%(frameworkPrefix)sEnumConstantValue(%(parameterName)s));' % param_args)
            else:
                out_parameter_assignments.append('    jsonMessage->%(keyedSetMethod)s(ASCIILiteral("%(parameterName)s"), %(parameterName)s);' % param_args)

        async_args = {
            'domainName': domain.domain_name,
            'callbackName': ucfirst(command.command_name) + 'Callback',
            'formalParameters': ", ".join(formal_parameters),
            'outParameterAssignments': "\n".join(out_parameter_assignments)
        }
        return Template(Templates.BackendDispatcherImplementationAsyncCommand).substitute(None, **async_args)
    def __init__(self, redis, features={}):
        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        for field in ['contact', 'npc']:
            if not hasattr(self, field):
                setattr(self, field, NPC(self.redis))
        if not hasattr(self, 'business'):
            setattr(self, 'business', Business(self.redis))

        if not hasattr(self, 'text'):

            for field in ['hook', 'request']:
                if hasattr(self, field):
                    self.template = getattr(self, field) + ' ' + self.template

            for field in ['requirement', 'disclaimer', 'payment']:
                if hasattr(self, field):
                    self.template = self.template + ' ' + getattr(self, field)

            self.template = self.template + ' Contact ' + self.contact.name.fullname
            self.template = self.template + ' at the ' + self.business.name.fullname
            if hasattr(self, 'detail'):
                self.template = self.template + ' ' + self.detail

            self.template += '.'

            self.text = self.render_template(self.template)
            self.text = self.render_template(self.text)
        self.text = self.text[0].capitalize() + self.text[1:]
    def generate_domain(self, domain):
        lines = []
        args = {
            'domain': domain.domain_name
        }

        lines.append('// %(domain)s.' % args)

        has_async_commands = any(map(lambda command: command.is_async, domain.commands))
        if len(domain.events) > 0 or has_async_commands:
            lines.append('InspectorBackend.register%(domain)sDispatcher = InspectorBackend.registerDomainDispatcher.bind(InspectorBackend, "%(domain)s");' % args)

        for declaration in domain.type_declarations:
            if declaration.type.is_enum():
                enum_args = {
                    'domain': domain.domain_name,
                    'enumName': declaration.type_name,
                    'enumMap': ", ".join(['%s: "%s"' % (Generator.stylized_name_for_enum_value(enum_value), enum_value) for enum_value in declaration.type.enum_values()])
                }
                lines.append('InspectorBackend.registerEnum("%(domain)s.%(enumName)s", {%(enumMap)s});' % enum_args)

            def is_anonymous_enum_member(type_member):
                return isinstance(type_member.type, EnumType) and type_member.type.is_anonymous

            for _member in filter(is_anonymous_enum_member, declaration.type_members):
                enum_args = {
                    'domain': domain.domain_name,
                    'enumName': '%s%s' % (declaration.type_name, ucfirst(_member.member_name)),
                    'enumMap': ", ".join(['%s: "%s"' % (Generator.stylized_name_for_enum_value(enum_value), enum_value) for enum_value in _member.type.enum_values()])
                }
                lines.append('InspectorBackend.registerEnum("%(domain)s.%(enumName)s", {%(enumMap)s});' % enum_args)

        for event in domain.events:
            event_args = {
                'domain': domain.domain_name,
                'eventName': event.event_name,
                'params': ", ".join(['"%s"' % parameter.parameter_name for parameter in event.event_parameters])
            }
            lines.append('InspectorBackend.registerEvent("%(domain)s.%(eventName)s", [%(params)s]);' % event_args)

        for command in domain.commands:

            def generate_parameter_object(parameter):
                optional_string = "true" if parameter.is_optional else "false"
                pairs = []
                pairs.append('"name": "%s"' % parameter.parameter_name)
                pairs.append('"type": "%s"' % Generator.js_name_for_parameter_type(parameter.type))
                pairs.append('"optional": %s' % optional_string)
                return "{%s}" % ", ".join(pairs)

            command_args = {
                'domain': domain.domain_name,
                'commandName': command.command_name,
                'callParams': ", ".join([generate_parameter_object(parameter) for parameter in command.call_parameters]),
                'returnParams': ", ".join(['"%s"' % parameter.parameter_name for parameter in command.return_parameters]),
            }
            lines.append('InspectorBackend.registerCommand("%(domain)s.%(commandName)s", [%(callParams)s], [%(returnParams)s]);' % command_args)

        return "\n".join(lines)
	def __init__(self, collection_in):
		Generator.__init__(self)
		self.collection = collection_in
		self.complex = False
		if(isinstance(collection_in, dict)):
			self.complex = True

		self.default_gvars = {"contingent": []}
Exemplo n.º 9
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)
        


        self.name=Name(self.redis,'street')
Exemplo n.º 10
0
    def __init__(self, redis, features={}):
        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        if not hasattr(self, 'text'):
            self.text = self.render_template(self.template)
            self.text = self.render_template(self.text)
        self.text = self.text[0].capitalize() + self.text[1:]
Exemplo n.º 11
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        self.select_colors()

        self.overlay_stripe_countselected = random.randint(0, int(self.overlay_stripe_count))
        if not hasattr(self, 'letter'):
            self.letter = random.choice(string.ascii_uppercase)
Exemplo n.º 12
0
 def connect(a, b):
     r = rooms[a]
     s = rooms[b]
     edge = list(set(Generator.get_outline(r, eight=False)) &
                 set(Generator.get_outline(s, eight=False)))
     if edge:
         self.doors.append(random.choice(edge))
         self.path[tuple(r)].append(Way(s, self.doors[-1]))
         return True
     return False
Exemplo n.º 13
0
    def grow_room(self, room, growing, max_size, pad_v=0, pad_h=0, space=None):
        """Tries to grow a room in the specified direction

        Returns whether the growth succeeded"""
        space = space if space is not None else self.interior_space
        for d, grow in enumerate(growing):
            if not grow:
                continue
            if (((d == LEFT or d == RIGHT) and room.w > max_size) or
                    ((d == UP or d == DOWN) and room.h > max_size)):
                growing[d] = False
                continue
            left, top, width, height = room.x, room.y, room.w, room.h
            if d == LEFT:
                left -= 1
                width += 1
                if room.w <= 1:
                    collision = None
                else:
                    collision = Rect(room.x - pad_h, room.y + 1 - pad_v,
                                     1 + pad_h, max(1, room.h + 2 * pad_v - 2))
            elif d == RIGHT:
                width += 1
                if room.w <= 1:
                    collision = None
                else:
                    collision = Rect(room.right - 1 - pad_h, room.y + 1,
                                     1 + pad_h, max(1, room.h + 2 * pad_v - 2))
            elif d == DOWN:
                height += 1
                if room.h <= 1:
                    collision = None
                else:
                    collision = Rect(room.x + 1 - pad_h, room.bottom - 1,
                                     max(1, room.w - 2 + 2 * pad_h), 1 + pad_v)
            elif d == UP:
                top -= 1
                height += 1
                if room.h <= 1:
                    collision = None
                else:
                    collision = Rect(room.x + 1 - pad_h, room.y - pad_v,
                                     max(1, room.w - 2 + 2 * pad_h), 1 + pad_v)
            if collision is not None:
                building_collisions = collision.collidelistall([r.rect for r in self.shapes if isinstance(r, Room)])
            else:
                building_collisions = []
            if not (set(Generator.get_rect(collision)) - space) and len(building_collisions) == 0:
                room.left = left
                room.width = width
                room.top = top
                room.height = height
            else:
                print room.rect, collision, d, building_collisions, (set(Generator.get_rect(collision)) - space)
                growing[d] = False
Exemplo n.º 14
0
 def __init__(self, level):
     Generator.__init__(self, level)
     self.size = 80
     self.width = random.randint(self.size*0.4, self.size*0.6) * 2
     self.height = random.randint(self.size*0.4, self.size*0.6) * 2
     self.orientation = random.randint(0,3)
     self.corridor_size = random.randint(self.size*0.2, self.size*0.3)
     self.room_size = random.randint(4, 6)
     self.randomness = 0#random.randint(0, 4)
     self.reflect_axis = HORIZONTAL
     self.reflect_centre = 0.5
Exemplo n.º 15
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        for person in ['npc', 'victim']:
            if not hasattr(self, person):
                setattr(self, person, NPC(self.redis))

        self.headline = self.render_template(self.headline)
        self.lastseen = self.render_template(self.lastseen)
Exemplo n.º 16
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        # Double parse the template to fill in templated template values.

        if not hasattr(self, 'text'):
            self.text = self.render_template(self.template)
            self.text = self.render_template(self.text)
        self.text = self.text[0].capitalize() + self.text[1:]
Exemplo n.º 17
0
    def __init__(self, xml):
        Generator.__init__(self)

        if os.path.isfile(xml):
            plugin_tree = etree.parse(xml)
            plugins = plugin_tree.xpath("/plugins/plugin")

            for plugin in plugins:
                self.__generate_plugin__(plugin, xml)
        else:
            print "XML file: " + xml + "  not valid !"
    def __init__(self, redis, features={}):
        """ Generate a Rogue-like dungeon """

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)
        self.generate_features('dungeon')

        self.generate_grid()
        self.generate_rooms()
        self.generate_halls()
        self.name=Name(self.redis, 'dungeon')
Exemplo n.º 19
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        self.generate_features('motivation' + self.kind)

        if not hasattr(self, 'npc'):
            self.npc = npc.NPC(self.redis, {'motivation': self})

        self.text = self.render_template(self.text)
    def _generate_open_field_names(self):
        lines = []
        for domain in self.domains_to_generate():
            type_declarations = self.type_declarations_for_domain(domain)
            for type_declaration in [decl for decl in type_declarations if Generator.type_has_open_fields(decl.type)]:
                open_members = Generator.open_fields(type_declaration)
                for type_member in sorted(open_members, key=lambda member: member.member_name):
                    field_name = '::'.join(['Inspector', 'Protocol', domain.domain_name, ucfirst(type_declaration.type_name), ucfirst(type_member.member_name)])
                    lines.append('const char* %s = "%s";' % (field_name, type_member.member_name))

        return '\n'.join(lines)
Exemplo n.º 21
0
    def __init__(self, redis, features={}):
        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        if not hasattr(self, 'regioncount'):
            self.regioncount = random.randint(self.regiondetails['mincount'], self.regiondetails['maxcount'])
        if not hasattr(self, 'leader'):
            self.leader = leader.Leader(self.redis, {"location":self})
            #self.leader = Leader(self.redis)
        if not hasattr(self, 'name'):
            self.name = Name(self.redis, 'country')
Exemplo n.º 22
0
    def mirror(self, axis, centre):
        """
        Return a mirrored copy of the shape
        @param axis: axis to mirror in
        @param centre: coordinate of mirror axis
        @return: Shape : the mirrored copy
        """

        new = ArbitraryShape(set(Generator.reflect_points(self.points, axis, centre)))
        new.outline_gaps = set(Generator.reflect_points(self.outline_gaps, axis, centre))
        return new
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        if not hasattr(self, 'leader'):
            self.leader = leader.Leader(self.redis, {"location":self})
            #self.leader = Leader(self.redis)

        if not hasattr(self, 'text'):
            self.text = self.render_template(self.template)
        self.name=Name(self.redis, 'organization', {'leader':self.leader})
 def _generate_enum_from_protocol_string(self, objc_enum_name, enum_values):
     lines = []
     lines.append('template<>')
     lines.append('inline %s fromProtocolString(const String& value)' % objc_enum_name)
     lines.append('{')
     for enum_value in enum_values:
         lines.append('    if (value == "%s")' % enum_value)
         lines.append('        return %s%s;' % (objc_enum_name, Generator.stylized_name_for_enum_value(enum_value)))
     lines.append('    ASSERT_NOT_REACHED();')
     lines.append('    return %s%s;' % (objc_enum_name, Generator.stylized_name_for_enum_value(enum_values[0])))
     lines.append('}')
     return '\n'.join(lines)
 def _generate_enum_from_protocol_string(self, objc_enum_name, enum_values):
     lines = []
     lines.append("template<>")
     lines.append("inline %s fromProtocolString(const String& value)" % objc_enum_name)
     lines.append("{")
     for enum_value in enum_values:
         lines.append('    if (value == "%s")' % enum_value)
         lines.append("        return %s%s;" % (objc_enum_name, Generator.stylized_name_for_enum_value(enum_value)))
     lines.append("    ASSERT_NOT_REACHED();")
     lines.append("    return %s%s;" % (objc_enum_name, Generator.stylized_name_for_enum_value(enum_values[0])))
     lines.append("}")
     return "\n".join(lines)
Exemplo n.º 26
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        for person in ['npc', 'villain']:
            if not hasattr(self, person):
                setattr(self, person, NPC(self.redis))

        if not hasattr(self, 'text'):
            self.text = self.render_template(self.template)
            self.text = self.render_template(self.text)
        self.text = self.text[0].capitalize() + self.text[1:]
Exemplo n.º 27
0
 def __init__(self, level):
     Generator.__init__(self, level)
     self.size = 50
     self.width = random.randint(self.size*0.4, self.size*0.6) * 2
     self.height = random.randint(self.size*0.4, self.size*0.6) * 2
     self.orientation = random.randint(0,3)
     self.turretsize = random.randint(6,10)
     self.gatesize = random.randint(1,4)
     self.wallwidth = random.randint(3,4)
     self.turret_project = random.randint(2, min(4, self.turretsize-3))
     self.gatehouse_project = random.randint(0,4)
     if self.gatesize % 2 == 1:
         self.width -= 1
Exemplo n.º 28
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        for person in ["victim", "culprit", "source", "believer"]:
            if not hasattr(self, person):
                setattr(self, person, NPC(self.redis).name.fullname)

        if not hasattr(self, "text"):
            self.text = self.render_template(self.template)
            self.text = self.render_template(self.text)
        self.text = self.text[0].capitalize() + self.text[1:]
Exemplo n.º 29
0
    def __init__(self, level, size=80, house_size=9, house_chance=0.5, branchiness=0.8, **params):
        Generator.__init__(self, level)
        self.size = size
        self.road_width = 1
        self.house_size = house_size
        self.house_chance = house_chance
        self.branchiness = branchiness

        self.road_length = house_size*2
        self.num_roads = size/house_size

        for param in params:
            self.setattr(param, params[param])
Exemplo n.º 30
0
    def __init__(self, redis, features={}):

        Generator.__init__(self, redis, features)
        self.logger = logging.getLogger(__name__)

        if not hasattr(self, 'text'):

            # TODO move this to the data file
            # self.text=self.render_template("{{params.quality['name']|article}}
            #               {{params.kind}}, {{params.repair['name']}}")

            self.text = self.render_template(self.template)
        self.text = self.text[0].capitalize() + self.text[1:]
    def _generate_class_for_object_declaration(self, type_declaration, domain):
        if len(type_declaration.type_members) == 0:
            return ''

        enum_members = filter(
            lambda member: isinstance(member.type, EnumType) and member.type.
            is_anonymous, type_declaration.type_members)
        required_members = filter(lambda member: not member.is_optional,
                                  type_declaration.type_members)
        optional_members = filter(lambda member: member.is_optional,
                                  type_declaration.type_members)
        object_name = type_declaration.type_name

        lines = []
        if len(type_declaration.description) > 0:
            lines.append('/* %s */' % type_declaration.description)
        base_class = 'Inspector::InspectorObject'
        if not Generator.type_has_open_fields(type_declaration.type):
            base_class = base_class + 'Base'
        lines.append('class %s : public %s {' % (object_name, base_class))
        lines.append('public:')
        for enum_member in enum_members:
            lines.append(
                '    // Named after property name \'%s\' while generating %s.'
                % (enum_member.member_name, object_name))
            lines.append(
                self._generate_struct_for_anonymous_enum_member(enum_member))
        lines.append(self._generate_builder_state_enum(type_declaration))

        constructor_example = []
        constructor_example.append('     * Ref<%s> result = %s::create()' %
                                   (object_name, object_name))
        for member in required_members:
            constructor_example.append('     *     .set%s(...)' %
                                       ucfirst(member.member_name))
        constructor_example.append('     *     .release()')

        builder_args = {
            'objectType': object_name,
            'constructorExample': '\n'.join(constructor_example) + ';',
        }

        lines.append(
            Template(CppTemplates.ProtocolObjectBuilderDeclarationPrelude).
            substitute(None, **builder_args))
        for type_member in required_members:
            lines.append(
                self._generate_builder_setter_for_member(type_member, domain))
        lines.append(
            Template(CppTemplates.ProtocolObjectBuilderDeclarationPostlude).
            substitute(None, **builder_args))
        for member in optional_members:
            lines.append(
                self._generate_unchecked_setter_for_member(member, domain))

        if Generator.type_has_open_fields(type_declaration.type):
            lines.append('')
            lines.append('    // Property names for type generated as open.')
            for type_member in type_declaration.type_members:
                export_macro = self.model().framework.setting(
                    'export_macro', None)
                lines.append('    %s static const char* %s;' %
                             (export_macro, ucfirst(type_member.member_name)))

        lines.append('};')
        lines.append('')
        return '\n'.join(lines)
Exemplo n.º 32
0
    def __init__(self,
                 X_train_file='',
                 Y_train_file='',
                 batch_size=1,
                 image_size=256,
                 use_lsgan=True,
                 norm='instance',
                 lambda1=10.0,
                 lambda2=10.0,
                 learning_rate=2e-4,
                 beta1=0.5,
                 ngf=64):
        """
    Args:
      X_train_file: string, X tfrecords file for training
      Y_train_file: string Y tfrecords file for training
      batch_size: integer, batch size
      image_size: integer, image size
      lambda1: integer, weight for forward cycle loss (X->Y->X)
      lambda2: integer, weight for backward cycle loss (Y->X->Y)
      use_lsgan: boolean
      norm: 'instance' or 'batch'
      learning_rate: float, initial learning rate for Adam
      beta1: float, momentum term of Adam
      ngf: number of gen filters in first conv layer
    """
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.use_lsgan = use_lsgan
        use_sigmoid = not use_lsgan
        self.batch_size = batch_size
        self.image_size = image_size
        self.learning_rate = learning_rate
        self.beta1 = beta1
        self.X_train_file = X_train_file
        self.Y_train_file = Y_train_file

        self.is_training = tf.placeholder_with_default(True,
                                                       shape=[],
                                                       name='is_training')

        self.G = Generator('G',
                           self.is_training,
                           ngf=ngf,
                           norm=norm,
                           image_size=image_size)
        self.D_Y = Discriminator('D_Y',
                                 self.is_training,
                                 norm=norm,
                                 use_sigmoid=use_sigmoid)
        self.F = Generator('F',
                           self.is_training,
                           norm=norm,
                           image_size=image_size)
        self.D_X = Discriminator('D_X',
                                 self.is_training,
                                 norm=norm,
                                 use_sigmoid=use_sigmoid)

        self.fake_x = tf.placeholder(
            tf.float32, shape=[batch_size, image_size, image_size, 3])
        self.fake_y = tf.placeholder(
            tf.float32, shape=[batch_size, image_size, image_size, 3])
Exemplo n.º 33
0
 def __init__(self, file_path):
     self.tokenizer = Tokenizer(file_path)
     self.parser = Parser()
     self.generator = Generator()
     self.env = Environment()
     self.interpreted_file_name = None
Exemplo n.º 34
0
def main(_):
    # set useful shortcuts and initialize timing
    tf.logging.set_verbosity(tf.logging.INFO)
    log_dir = FLAGS.logdir
    start_time = time.time()

    # load dataset
    dataset = Dataset(bs=FLAGS.target_batch_size,
                      filepattern=FLAGS.filepattern)

    with tf.Graph().as_default():
        generator = Generator(FLAGS.batch_size, FLAGS.noise_dim)
        wasserstein = Wasserstein(generator, dataset)

        # create optimization problem to solve (adversarial-free GAN)
        loss = wasserstein.dist(C=0.1, nsteps=10)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        adam = tf.train.AdamOptimizer(FLAGS.learning_rate, FLAGS.momentum)
        train_step = adam.minimize(loss, global_step=global_step)

        # add summaries for tensorboard
        tf.summary.scalar('loss', loss)
        wasserstein.add_summary_images(num=9)
        all_summaries = tf.summary.merge_all()

        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        sm = tf.train.SessionManager()
        config = tf.ConfigProto()
        try:
            sess = sm.prepare_session('',
                                      init_op=init,
                                      saver=saver,
                                      config=config,
                                      checkpoint_dir=log_dir)
        except tf.errors.InvalidArgumentError:
            tf.logging.info('Cannot load old session. Starting new one.')
            sess = tf.Session(config=config)
            sess.run(init)

        # output graph for early inspection
        test_writer = tf.summary.FileWriter(log_dir, graph=sess.graph)
        test_writer.flush()

        summary, current_loss = sess.run([all_summaries, loss])
        test_writer.add_summary(summary)
        test_writer.flush()

        sys_stdout_flush('Time to start training %f s\n' %
                         (time.time() - start_time))
        start_time = time.time()
        iteration_time = time.time()

        for i in xrange(FLAGS.num_steps):
            if i % 10 == 0:
                sys_stdout_flush('.')
            if i % 100 == 0:  # record summaries
                summary, current_loss, step = sess.run(
                    [all_summaries, loss, global_step])
                test_writer.add_summary(summary, i)
                test_writer.flush()

                sys_stdout_flush('Step %d[%d] (%s): loss %f ' %
                                 (i, step, time.ctime(), current_loss))
                sys_stdout_flush('iteration: %f s ' %
                                 (time.time() - iteration_time))
                iteration_time = time.time()
                sys_stdout_flush('total: %f s ' % (time.time() - start_time))
                sys_stdout_flush('\n')

            sess.run(train_step)
            if i % 1000 == 0:
                sys_stdout_flush('Saving snapshot.\n')
                saver.save(sess,
                           os.path.join(log_dir, 'wasserstein.ckpt'),
                           global_step=global_step)

        saver.save(sess, os.path.join(log_dir, 'wasserstein-final.ckpt'))
        sys_stdout_flush('Done.\n')
        test_writer.close()
Exemplo n.º 35
0
def train(args):
    # Context
    ctx = get_extension_context(args.context,
                                device_id=args.device_id,
                                type_config=args.type_config)
    nn.set_default_context(ctx)

    aug_list = args.aug_list

    # Model
    scope_gen = "Generator"
    scope_dis = "Discriminator"
    # generator loss
    z = nn.Variable([args.batch_size, args.latent, 1, 1])
    x_fake = Generator(z, scope_name=scope_gen, img_size=args.image_size)
    p_fake = Discriminator([augment(xf, aug_list) for xf in x_fake],
                           label="fake",
                           scope_name=scope_dis)
    lossG = loss_gen(p_fake)
    # discriminator loss
    x_real = nn.Variable(
        [args.batch_size, 3, args.image_size, args.image_size])
    x_real_aug = augment(x_real, aug_list)
    p_real, rec_imgs, part = Discriminator(x_real_aug,
                                           label="real",
                                           scope_name=scope_dis)
    lossD_fake = loss_dis_fake(p_fake)
    lossD_real = loss_dis_real(p_real, rec_imgs, part, x_real_aug)
    lossD = lossD_fake + lossD_real
    # generator with fixed latent values for test
    # Use train=True even in an inference phase
    z_test = nn.Variable.from_numpy_array(
        np.random.randn(args.batch_size, args.latent, 1, 1))
    x_test = Generator(z_test,
                       scope_name=scope_gen,
                       train=True,
                       img_size=args.image_size)[0]

    # Exponential Moving Average (EMA) model
    # Use train=True even in an inference phase
    scope_gen_ema = "Generator_EMA"
    x_test_ema = Generator(z_test,
                           scope_name=scope_gen_ema,
                           train=True,
                           img_size=args.image_size)[0]
    copy_params(scope_gen, scope_gen_ema)
    update_ema_var = make_ema_updater(scope_gen_ema, scope_gen, 0.999)

    # Solver
    solver_gen = S.Adam(args.lr, beta1=0.5)
    solver_dis = S.Adam(args.lr, beta1=0.5)
    with nn.parameter_scope(scope_gen):
        params_gen = nn.get_parameters()
        solver_gen.set_parameters(params_gen)
    with nn.parameter_scope(scope_dis):
        params_dis = nn.get_parameters()
        solver_dis.set_parameters(params_dis)

    # Monitor
    monitor = Monitor(args.monitor_path)
    monitor_loss_gen = MonitorSeries("Generator Loss", monitor, interval=10)
    monitor_loss_dis_real = MonitorSeries("Discriminator Loss Real",
                                          monitor,
                                          interval=10)
    monitor_loss_dis_fake = MonitorSeries("Discriminator Loss Fake",
                                          monitor,
                                          interval=10)
    monitor_time = MonitorTimeElapsed("Training Time", monitor, interval=10)
    monitor_image_tile_train = MonitorImageTile("Image Tile Train",
                                                monitor,
                                                num_images=args.batch_size,
                                                interval=1,
                                                normalize_method=lambda x:
                                                (x + 1.) / 2.)
    monitor_image_tile_test = MonitorImageTile("Image Tile Test",
                                               monitor,
                                               num_images=args.batch_size,
                                               interval=1,
                                               normalize_method=lambda x:
                                               (x + 1.) / 2.)
    monitor_image_tile_test_ema = MonitorImageTile("Image Tile Test EMA",
                                                   monitor,
                                                   num_images=args.batch_size,
                                                   interval=1,
                                                   normalize_method=lambda x:
                                                   (x + 1.) / 2.)

    # Data Iterator
    rng = np.random.RandomState(141)
    di = data_iterator(args.img_path,
                       args.batch_size,
                       imsize=(args.image_size, args.image_size),
                       num_samples=args.train_samples,
                       rng=rng)

    # Train loop
    for i in range(args.max_iter):
        # Train discriminator
        x_fake[0].need_grad = False  # no need backward to generator
        x_fake[1].need_grad = False  # no need backward to generator
        solver_dis.zero_grad()
        x_real.d = di.next()[0]
        z.d = np.random.randn(args.batch_size, args.latent, 1, 1)
        lossD.forward()
        lossD.backward()
        solver_dis.update()

        # Train generator
        x_fake[0].need_grad = True  # need backward to generator
        x_fake[1].need_grad = True  # need backward to generator
        solver_gen.zero_grad()
        lossG.forward()
        lossG.backward()
        solver_gen.update()

        # Update EMA model
        update_ema_var.forward()

        # Monitor
        monitor_loss_gen.add(i, lossG.d)
        monitor_loss_dis_real.add(i, lossD_real.d)
        monitor_loss_dis_fake.add(i, lossD_fake.d)
        monitor_time.add(i)

        # Save
        if (i + 1) % args.save_interval == 0:
            with nn.parameter_scope(scope_gen):
                nn.save_parameters(
                    os.path.join(args.monitor_path,
                                 "Gen_iter{}.h5".format(i + 1)))
            with nn.parameter_scope(scope_gen_ema):
                nn.save_parameters(
                    os.path.join(args.monitor_path,
                                 "GenEMA_iter{}.h5".format(i + 1)))
            with nn.parameter_scope(scope_dis):
                nn.save_parameters(
                    os.path.join(args.monitor_path,
                                 "Dis_iter{}.h5".format(i + 1)))
        if (i + 1) % args.test_interval == 0:
            x_test.forward(clear_buffer=True)
            x_test_ema.forward(clear_buffer=True)
            monitor_image_tile_train.add(i + 1, x_fake[0])
            monitor_image_tile_test.add(i + 1, x_test)
            monitor_image_tile_test_ema.add(i + 1, x_test_ema)

    # Last
    x_test.forward(clear_buffer=True)
    x_test_ema.forward(clear_buffer=True)
    monitor_image_tile_train.add(args.max_iter, x_fake[0])
    monitor_image_tile_test.add(args.max_iter, x_test)
    monitor_image_tile_test_ema.add(args.max_iter, x_test_ema)
    with nn.parameter_scope(scope_gen):
        nn.save_parameters(
            os.path.join(args.monitor_path,
                         "Gen_iter{}.h5".format(args.max_iter)))
    with nn.parameter_scope(scope_gen_ema):
        nn.save_parameters(
            os.path.join(args.monitor_path,
                         "GenEMA_iter{}.h5".format(args.max_iter)))
    with nn.parameter_scope(scope_dis):
        nn.save_parameters(
            os.path.join(args.monitor_path,
                         "Dis_iter{}.h5".format(args.max_iter)))
Exemplo n.º 36
0
    # x = K.layers.ReLU(6., name='pre_encoding')(x)
    x = Reshape((ENCODER_SHAPE,), name='encoding')(x)
    preds = CosineSoftmax(output_dim=out_shape)(x)
    model = Model(inputs=base_model.inputs, outputs=preds)
    # for layer in base_model.layers:
    #         layer.trainable = False

    if model_to_restore is not None:
        model.load_weights(model_to_restore)

    return model


if __name__ == '__main__':
    generator = Generator(IN_DIR, BATCH_SIZE, TARGET_SIZE[1], TARGET_SIZE[0],
                          val_to_train=0.15, preprocessor=preprocess_input,
                          augmenter=augm_hard.augment_images)  # augm.augment_images)
    model = create_model_mobilenet(MODEL_RESTORE, (TARGET_SIZE[0], TARGET_SIZE[1], 3), generator.cats_num)

    if MODEL_RESTORE is not None:
        optimizer = K.optimizers.RMSprop(lr=0.0001, decay=0.03)
    else:
        optimizer = K.optimizers.Adam(lr=0.0001)
        #optimizer = K.optimizers.RMSprop(lr=0.002, decay=0.01)

    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    model.summary()
    out = open(os.path.join(os.path.normpath(MODEL_OUT_DIR), "model_13_v2.json"), "w")
    out.write(model.to_json())
    out.close()
 def domains_to_generate(self):
     return list(filter(self.should_generate_types_for_domain, Generator.domains_to_generate(self)))
            one_hot = one_hot.cuda()
        loss = torch.masked_select(prob, one_hot)
        loss = loss * reward
        loss =  -torch.sum(loss)
        return loss


target_lstm = TargetLSTM(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)
if opt.cuda:
    target_lstm = target_lstm.cuda()
# Generate toy data using target lstm
print('Generating data ...')
generate_samples(target_lstm, BATCH_SIZE, GENERATED_NUM, POSITIVE_FILE)
# Load data from file
gen_data_iter = GenDataIter(POSITIVE_FILE, BATCH_SIZE)
original_generator = Generator(VOCAB_SIZE, g_emb_dim, g_hidden_dim, opt.cuda)


def main(generator_,PRE_EPOCH_NUM):
    #random.seed(SEED)
    #np.random.seed(SEED)

    perf_dict = {}
    true_loss = []
    generator_loss = []
    disc_loss = []
    nb_batch_per_epoch=int(GENERATED_NUM / BATCH_SIZE)

    # Define Networks
    generator = copy.deepcopy(generator_)
    discriminator = Discriminator(d_num_class, VOCAB_SIZE, d_emb_dim, d_filter_sizes, d_num_filters, d_dropout)
Exemplo n.º 39
0
if "gen" in args:
    generate_train_data()
    generate_test_data()

if "gen-train" in args:
    generate_train_data()
if "gen-test" in args:
    generate_test_data()

if "train" in args:
    train()
    test()

if "mock-faulty" in args:
    f = args[args.index("mock-faulty") + 1]
    gen = Generator(f, is_mock=True)
    gen.generate_mock_csv(is_faulty=True)

if "mock-reliable" in args:
    f = args[args.index("mock-reliable") + 1]
    gen = Generator(f, is_mock=True)
    gen.generate_mock_csv(is_faulty=False)

if "predict" in args:
    file_to_predict = args[args.index("predict") + 1]
    if os.path.exists("data/decision-tree.pkl"):
        cart.load_tree()
        cart.predict(file_to_predict)
    else:
        program_error("Gere um modelo antes")
Exemplo n.º 40
0

if __name__ == '__main__':
    # Parse arguments
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
    if not args.hpc:
        args.data_path = ''
    POSITIVE_FILE = args.data_path + POSITIVE_FILE
    NEGATIVE_FILE = args.data_path + NEGATIVE_FILE

    # Set models, criteria, optimizers
    generator = Generator(args.vocab_size, g_embed_dim, g_hidden_dim,
                          args.cuda)
    discriminator = Discriminator(d_num_class, args.vocab_size, d_embed_dim,
                                  d_filter_sizes, d_num_filters,
                                  d_dropout_prob)
    target_lstm = TargetLSTM(args.vocab_size, g_embed_dim, g_hidden_dim,
                             args.cuda)
    nll_loss = nn.NLLLoss()
    pg_loss = PGLoss()
    if args.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        target_lstm = target_lstm.cuda()
        nll_loss = nll_loss.cuda()
        pg_loss = pg_loss.cuda()
        cudnn.benchmark = True
    gen_optimizer = optim.Adam(params=generator.parameters(), lr=args.gen_lr)
Exemplo n.º 41
0
opt = parser.parse_args()
print(opt)

# Create sample and checkpoint directories
os.makedirs('images/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('saved_models/%s' % opt.dataset_name, exist_ok=True)

# Losses
criterion_GAN = nn.MSELoss()
criterion_cycle = nn.L1Loss()
criterion_identity = nn.L1Loss()
criterion_similar = nn.L1Loss()

# Initialize generator and discriminator
# G = GeneratorResNet(res_blocks=opt.n_residual_blocks)
G = Generator(res_blocks=opt.n_residual_blocks)
D_A = Discriminator()
D_B = Discriminator()

# if GPU is accessible
cuda = True if torch.cuda.is_available() else False
if cuda:
    G = G.cuda()
    D_A = D_A.cuda()
    D_B = D_B.cuda()
    criterion_GAN.cuda()
    criterion_cycle.cuda()
    criterion_identity.cuda()
    criterion_similar.cuda()

# Optimizers
Exemplo n.º 42
0
 def domains_to_generate(self):
     return filter(
         ObjCGenerator.should_generate_domain_command_handler_filter(
             self.model()), Generator.domains_to_generate(self))
Exemplo n.º 43
0
    def __init__(self,
                 main_dir=None,
                 CaseStudy=0,
                 seq=3,
                 ndraw=10000,
                 thin=1,
                 nCR=3,
                 DEpairs=3,
                 parallelUpdate=1.0,
                 pCR=True,
                 k=10,
                 pJumpRate_one=0.2,
                 steps=100,
                 savemodout=False,
                 saveout=True,
                 save_tmp_out=True,
                 Prior='LHS',
                 DoParallel=True,
                 eps=5e-2,
                 BoundHandling='Reflect',
                 lik_sigma_est=False,
                 parallel_jobs=4,
                 jr_scale=1.0,
                 rng_seed=123,
                 it_b=10,
                 jr_factor=0.2,
                 CESSf_div=0.999993,
                 ESSf_div=0.5,
                 AR_min=25.0):

        self.CaseStudy = CaseStudy
        MCMCPar.seq = seq
        MCMCPar.ndraw = ndraw
        MCMCPar.thin = thin
        MCMCPar.nCR = nCR
        MCMCPar.DEpairs = DEpairs
        MCMCPar.parallelUpdate = parallelUpdate
        MCMCPar.Do_pCR = pCR
        MCMCPar.k = k
        MCMCPar.pJumpRate_one = pJumpRate_one
        MCMCPar.steps = steps
        MCMCPar.savemodout = savemodout
        MCMCPar.saveout = saveout
        MCMCPar.save_tmp_out = save_tmp_out
        MCMCPar.Prior = Prior
        MCMCPar.DoParallel = DoParallel
        MCMCPar.eps = eps
        MCMCPar.BoundHandling = BoundHandling
        MCMCPar.jr_scale = jr_scale
        MCMCPar.lik_sigma_est = lik_sigma_est
        Extra.n_jobs = parallel_jobs
        Extra.main_dir = main_dir
        np.random.seed(seed=None)
        MCMCPar.rng_seed = rng_seed
        MCMCPar.it_b = it_b
        MCMCPar.jr_factor = jr_factor
        MCMCPar.AR_min = AR_min
        MCMCPar.ESSf_div = ESSf_div
        MCMCPar.CESSf_div = CESSf_div

        if self.CaseStudy == 1:

            ModelName = 'nonlinear_gpr_tomo'
            MCMCPar.lik = 2

            MCMCPar.savemodout = True
            MCMCPar.lik_sigma_est = False

            MCMCPar.lb = np.ones((1, 15)) * -1
            MCMCPar.ub = np.ones((1, 15))
            MCMCPar.n = MCMCPar.ub.shape[1]

            # Forward model:
            nx = 60  # Here x is the horizontal axis (number of columns) and not the number of rows
            ny = 125  # Here y is the vertical axis (number of rows) and not the number of columns
            finefac = 1  # not working for channelized models
            spacing = 0.1 / finefac
            nx = np.int(60 * finefac)
            ny = np.int(125 * finefac)
            # The x-axis is varying the fastest
            sourcex = 0.1
            sourcez = np.arange(0.5, 12 + 0.5,
                                0.5)  #sources positions in meters
            receiverx = 5.9
            receiverz = np.arange(0.5, 12 + 0.5,
                                  0.5)  #receivers positions in meters
            xs = np.float32(
                sourcex /
                spacing)  #sources positions in model domain coordinates
            ys = sourcez / spacing  # divided by the spacing to get the domain coordinate
            rx = receiverx / spacing  #receivers positions in model domain coordinates
            rz = receiverz / spacing  # divided by the spacing to get receiverzthe domain coordinate
            nsource = len(sourcez)
            nreceiver = len(receiverz)
            ndata = nsource * nreceiver
            data = np.zeros((ndata, 4))
            x = np.arange(0, (nx / 10) + 0.1, 0.1)
            y = np.arange(0, (ny / 10) + 0.1, 0.1)

            # Neural network (SGAN):
            DNN = AttrDict()
            DNN.nx = nx
            DNN.ny = ny
            DNN.zx = 5
            DNN.zy = 3
            DNN.nc = 1
            DNN.nz = 1
            DNN.depth = 5
            DNN.threshold = True
            DNN.filtering = False
            DNN.cuda = False

            DNN.gpath = Extra.main_dir + '/netG.pth'

            from generator import Generator as Generator

            DNN.npx = (DNN.zx - 1) * 2**DNN.depth + 1
            DNN.npy = (DNN.zy - 1) * 2**DNN.depth + 1
            DNN.netG = Generator(cuda=DNN.cuda, gpath=DNN.gpath)
            for param in DNN.netG.parameters():
                param.requires_grad = False
            DNN.netG.eval()
            if DNN.cuda:
                DNN.netG.cuda()
            self.DNN = DNN

            # Load measurements

            Measurement.Sigma = 1  # Measurement error is 1 ns

            Measurement.MeasData = np.load('datatruemodel_sigma1.npy')

            #Filter travel times with source-receiver angle > 45 degrees
            source = []
            for i in range(1, 25):
                s = np.ones(24) * (0.5 * i)
                source = np.concatenate((source, s))

            rec = np.linspace(0.5, 12, 24)
            receiver = []

            for ii in range(1, 25):
                receiver = np.concatenate((receiver, rec))

            dist = np.abs(source - receiver)
            ind = np.where(dist > 6)[0]

            Measurement.MeasData = np.delete(Measurement.MeasData, ind)
            Measurement.N = len(Measurement.MeasData)

            #           Define the beta list to choose from in the binary search, to crate an adaptative beta sequence.

            betanum = 500
            #
            beta_list = np.zeros(betanum - 1)

            for i in range(1, betanum):

                beta_list[i - 1] = i * 2 * 10**-5

            MCMCPar.betainc_seq = beta_list

        MCMCPar.m0 = 10 * MCMCPar.n

        self.MCMCPar = MCMCPar
        self.Measurement = Measurement
        self.Extra = Extra
        self.ModelName = ModelName
Exemplo n.º 44
0
def main():
    port = serial.Serial(
        '/dev/ttyUSB0', 115200, parity=serial.PARITY_EVEN
    )  #pozn.: paritu prvni verze generatoru v FPGA nekontroluje, paritni bit jen precte a zahodi (aktualni k 8.4.2018)
    # port_ver = serial.Serial('/dev/ttyUSB2', 115200, parity= serial.PARITY_EVEN, timeout=1) #pozn.: paritu prvni verze generatoru v FPGA nekontroluje, paritni bit jen precte a zahodi (aktualni k 8.4.2018)

    # get the curses screen window
    screen = curses.initscr()
    curses.start_color()
    curses.use_default_colors()
    # turn off input echoing
    curses.noecho()
    # respond to keys immediately (don't wait for enter)
    curses.cbreak()
    # map arrow keys to special values
    screen.keypad(True)

    # initialize screen colors
    for i in range(0, curses.COLORS):
        curses.init_pair(i + 1, i, -1)

    f_4seq = [
        lambda i, val: val if i % 2 == 0 else 0, lambda i, val: val
        if i % 3 == 0 else 0, lambda i, val: val
        if i % 14 == 0 else 0, lambda i, val: val
        if i % 28 == 0 else 0, lambda i, val: val if i % 56 == 0 else 0
    ]

    gen = Generator(port)
    gen.set_frequency(300e3)
    # gen.set_frequency(40e3)

    rec = Receiver(
        30001, {
            "56H": lambda phases: updatePhases(gen, screen, list(phases)),
        }, lambda data: parsePhases(gen, screen, data))
    rec.start()

    try:
        i = 0
        j = 0
        while True:
            phases = [f_4seq[j](i + k, 180) for k in range(4 * 14)]
            # duty_cycles	= [f_4seq[j](i+k, 0.5) for k in range(4*14)]

            updatePhases(gen, screen, phases)

            # screen.refresh()

            char = screen.getch()
            if char == ord('q'):
                break
            elif char == curses.KEY_RIGHT:
                i -= 1
            elif char == curses.KEY_LEFT:
                i += 1
            elif char == curses.KEY_UP:
                j = (j + 1) % len(f_4seq)
            elif char == curses.KEY_DOWN:
                j = (j - 1) % len(f_4seq)

            # input("Press Enter to continue...")
            # sleep(0.02)

    finally:
        # shut down cleanly
        gen.set([0] * 64, [0.0] * 64)
        curses.nocbreak()
        screen.keypad(0)
        curses.echo()
        curses.endwin()
Exemplo n.º 45
0
from pytorch_lightning import Trainer

from discriminator import Discriminator
from faces_data_module import FacesDataModule
from generator import Generator
from wgan import WGAN

if __name__ == "__main__":
    data_module = FacesDataModule()
    wgan = WGAN(generator=Generator(), discriminator=Discriminator())

    trainer = Trainer(automatic_optimization=False)
    trainer.fit(wgan, data_module)
Exemplo n.º 46
0
        dest="with_gpx",
        action="store_true",
        help="get all keep data to gpx and download",
    )
    parser.add_argument(
        "--from-uid-sid",
        dest="from_uid_sid",
        action="store_true",
        help="from uid and sid for download datas",
    )
    options = parser.parse_args()
    if options.from_uid_sid:
        j = Joyrun.from_uid_sid(
            uid=str(options.phone_number_or_uid),
            sid=str(options.identifying_code_or_sid),
        )
    else:
        j = Joyrun(
            user_name=str(options.phone_number_or_uid),
            identifying_code=str(options.identifying_code_or_sid),
        )
        j.login_by_phone()

    generator = Generator(SQL_FILE)
    old_tracks_ids = generator.get_old_tracks_ids()
    tracks = j.get_all_joyrun_tracks(old_tracks_ids, options.with_gpx)
    generator.sync_from_app(tracks)
    activities_list = generator.load()
    with open(JSON_FILE, "w") as f:
        json.dump(activities_list, f)
Exemplo n.º 47
0
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        plt.imshow(sample)

    return fig


# Load data.
print("==> Loading data...")
trainloader, testloader = data_loader_and_transformer(args.batch_size)

# Load model.
print("==> Initializing model...")

aD = Discriminator()
aG = Generator()

if args.load_checkpoint:
    print("==> Loading checkpoint...")
    aD = torch.load('tempD.model')
    aG = torch.load('tempG.model')

aD.cuda()
aG.cuda()

# Optimizers, one for each model.
optimizer_g = torch.optim.Adam(aG.parameters(), lr=0.0001, betas=(0, 0.9))
optimizer_d = torch.optim.Adam(aD.parameters(), lr=0.0001, betas=(0, 0.9))

criterion = nn.CrossEntropyLoss()
Exemplo n.º 48
0
        # System-Level analyses
        pass_manager.enqueue_analysis("SymbolicSystemExecution")
        pass_manager.enqueue_analysis("SystemStateFlow")

        global_cfg = pass_manager.enqueue_analysis("ConstructGlobalCFG")
        global_abb_information = global_cfg.global_abb_information_provider()
        logging.info("Global control flow information is provided by %s",
                     global_abb_information.name())
        syscall_rules = SpecializedSystemCalls(global_abb_information)
    else:
        syscall_rules = FullSystemCalls()

    # From command line
    additional_passes = options.additional_passes

    for each in additional_passes:
        P = pass_manager.get_pass(each)
        if not P:
            panic("No such compiler pass: %s", each)
        pass_manager.enqueue_analysis(P)

    pass_manager.analyze("%s/gen_" % (options.prefix))

    generator = Generator.Generator(graph, options.name, arch_rules, os_rules,
                                    syscall_rules)

    generator.template_base = options.template_base
    generator.generate_into(options.prefix)

    graph.stats.save(options.prefix + "/stats.dict.py")
Exemplo n.º 49
0
 def __init__(self, trucks, cities):
     self.trucksM, self.trucks, self.cities = Generator.generateGA(
         trucks, cities)
     self.results = []
Exemplo n.º 50
0
 def __init__(self, swagger):
     Generator.__init__(self, swagger)
Exemplo n.º 51
0
    def _generate_assertion_for_object_declaration(self, object_declaration):
        required_members = filter(lambda member: not member.is_optional,
                                  object_declaration.type_members)
        optional_members = filter(lambda member: member.is_optional,
                                  object_declaration.type_members)
        should_count_properties = not Generator.type_has_open_fields(
            object_declaration.type)
        lines = []

        lines.append('#if !ASSERT_DISABLED')
        lines.append(
            'void BindingTraits<%s>::assertValueHasExpectedType(Inspector::InspectorValue* value)'
            %
            (CppGenerator.cpp_protocol_type_for_type(object_declaration.type)))
        lines.append("""{
    ASSERT_ARG(value, value);
    RefPtr<InspectorObject> object;
    bool castSucceeded = value->asObject(object);
    ASSERT_UNUSED(castSucceeded, castSucceeded);""")
        for type_member in required_members:
            args = {
                'memberName':
                type_member.member_name,
                'assertMethod':
                CppGenerator.cpp_assertion_method_for_type_member(
                    type_member, object_declaration)
            }

            lines.append("""    {
        InspectorObject::iterator %(memberName)sPos = object->find(ASCIILiteral("%(memberName)s"));
        ASSERT(%(memberName)sPos != object->end());
        %(assertMethod)s(%(memberName)sPos->value.get());
    }""" % args)

        if should_count_properties:
            lines.append('')
            lines.append('    int foundPropertiesCount = %s;' %
                         len(required_members))

        for type_member in optional_members:
            args = {
                'memberName':
                type_member.member_name,
                'assertMethod':
                CppGenerator.cpp_assertion_method_for_type_member(
                    type_member, object_declaration)
            }

            lines.append("""    {
        InspectorObject::iterator %(memberName)sPos = object->find(ASCIILiteral("%(memberName)s"));
        if (%(memberName)sPos != object->end()) {
            %(assertMethod)s(%(memberName)sPos->value.get());""" % args)

            if should_count_properties:
                lines.append('            ++foundPropertiesCount;')
            lines.append('        }')
            lines.append('    }')

        if should_count_properties:
            lines.append('    if (foundPropertiesCount != object->size())')
            lines.append(
                '        FATAL("Unexpected properties in object: %s\\n", object->toJSONString().ascii().data());'
            )
        lines.append('}')
        lines.append('#endif // !ASSERT_DISABLED')
        return '\n'.join(lines)
Exemplo n.º 52
0
def main():
    parser = argparse.ArgumentParser(description='Chainer: DCGAN MNIST')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=50,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=1000,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--n_hidden',
                        '-n',
                        type=int,
                        default=100,
                        help='Number of hidden units (z)')
    parser.add_argument('--seed',
                        type=int,
                        default=0,
                        help='Random seed of z at visualization stage')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    parser.add_argument('--image_dir',
                        default='./img',
                        help='Training image directory')

    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# n_hidden: {}'.format(args.n_hidden))
    print('# epoch: {}'.format(args.epoch))
    print('')

    gen = Generator()
    dis = Discriminator()

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        dis.to_gpu()

    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
        return optimizer

    opt_gen = make_optimizer(gen)
    opt_dis = make_optimizer(dis)

    def read_dataset(image_dir):
        fs = os.listdir(image_dir)
        dataset = []
        for fn in fs:
            img = Image.open(f"{args.image_dir}/{fn}").convert('RGB')
            dataset.append((np.asarray(img,
                                       dtype=np.float32).transpose(2, 0, 1)))
        return dataset

    dataset = read_dataset(args.image_dir)
    train_iter = chainer.iterators.SerialIterator(dataset,
                                                  args.batchsize,
                                                  repeat=True,
                                                  shuffle=True)

    updater = DCGANUpdater(models=(gen, dis),
                           iterator=train_iter,
                           optimizer={
                               'gen': opt_gen,
                               'dis': opt_dis
                           },
                           device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    epoch_interval = (1, 'epoch')
    # snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    # trainer.extend(extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    # trainer.extend(extensions.snapshot_object(gen, 'gen_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    # trainer.extend(extensions.snapshot_object(dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=epoch_interval)
    trainer.extend(extensions.snapshot_object(
        gen, 'gen_epoch_{.updater.epoch}.npz'),
                   trigger=epoch_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_epoch_{.updater.epoch}.npz'),
                   trigger=epoch_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'gen/loss',
        'dis/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_generated_image(gen, dis, 10, 10, args.seed, args.out),
                   trigger=epoch_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
Exemplo n.º 53
0
def main():
    # load rhyme table
    table = np.load("./data/table.npy")
    np.random.seed(SEED)
    random.seed(SEED)

    # data loader
    # gen_data_loader = Gen_Data_loader(BATCH_SIZE)
    input_data_loader = Input_Data_loader(BATCH_SIZE)
    dis_data_loader = Dis_dataloader(BATCH_SIZE)

    D = Discriminator(SEQ_LENGTH, num_class, vocab_size, dis_emb_size, dis_filter_sizes, dis_num_filters, 0.2)
    G = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN, table, has_input=True)

    # avoid occupy all the memory of the GPU
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())
    # savers for different models
    saver_gen = tf.train.Saver()
    saver_dis = tf.train.Saver()
    saver_seqgan = tf.train.Saver()

    # gen_data_loader.create_batches(positive_file)
    input_data_loader.create_batches(x_file, y_file)
    log = open('./experiment-log.txt', 'w')
    #  pre-train generator
    if pre_train_gen_path:
        print("loading pretrain generator model...")
        log.write("loading pretrain generator model...")
        restore_model(G, sess, saver_gen, pre_train_gen_path)
        print("loaded")
    else:
        log.write('pre-training generator...\n')
        print('Start pre-training...')
        for epoch in range(PRE_GEN_NUM):
            s = time.time()
            # loss = pre_train_epoch(sess, G, gen_data_loader)
            loss = pre_train_epoch(sess, G, input_data_loader)
            print("Epoch ", epoch, " loss: ", loss)
            log.write("Epoch:\t" + str(epoch) + "\tloss:\t" + str(loss) + "\n")
            print("pre-train generator epoch time: ", time.time() - s, " s")
            best = 1000
            if loss < best:
                saver_gen.save(sess, "./model/pre_gen/pretrain_gen_best")
                best = loss
    dev_loader = Input_Data_loader(BATCH_SIZE)
    dev_loader.create_batches(dev_x, dev_y)

    if pre_train_dis_path:
        print("loading pretrain discriminator model...")
        log.write("loading pretrain discriminator model...")
        restore_model(D, sess, saver_dis, pre_train_dis_path)
        print("loaded")
    else:
        log.write('pre-training discriminator...\n')
        print("Start pre-train the discriminator")
        s = time.time()
        for epoch in range(PRE_DIS_NUM):
            # generate_samples(sess, G, BATCH_SIZE, generated_num, negative_file)
            generate_samples(sess, G, BATCH_SIZE, generated_num, negative_file, input_data_loader)
            # dis_data_loader.load_train_data(positive_file, negative_file)
            dis_data_loader.load_train_data(y_file, negative_file)
            for _ in range(3):
                dis_data_loader.reset_pointer()
                for it in range(dis_data_loader.num_batch):
                    x_batch, y_batch = dis_data_loader.next_batch()
                    feed = {
                        D.input_x: x_batch,
                        D.input_y: y_batch,
                        D.dropout_keep_prob: dis_dropout_keep_prob
                    }
                    _, acc = sess.run([D.train_op, D.accuracy], feed)
            print("Epoch ", epoch, " Accuracy: ", acc)
            log.write("Epoch:\t" + str(epoch) + "\tAccuracy:\t" + str(acc) + "\n")
            best = 0
            # if epoch % 20  == 0 or epoch == PRE_DIS_NUM -1:
            #     print("saving at epoch: ", epoch)
            #     saver_dis.save(sess, "./model/per_dis/pretrain_dis", global_step=epoch)
            if acc > best:
                saver_dis.save(sess, "./model/pre_dis/pretrain_dis_best")
                best = acc
        print("pre-train discriminator: ", time.time() - s, " s")

    g_beta = G_beta(G, update_rate=0.8)

    print('#########################################################################')
    print('Start Adversarial Training...')
    log.write('Start adversarial training...\n')

    for total_batch in range(TOTAL_BATCH):
        s = time.time()
        for it in range(ADV_GEN_TIME):
            for i in range(input_data_loader.num_batch):
                target, input_x = input_data_loader.next_batch()
                samples = G.generate(sess, target)
                rewards = g_beta.get_reward(sess, target, input_x, sample_time, D)
                avg = np.mean(np.sum(rewards, axis=1), axis=0) / SEQ_LENGTH
                print(" epoch : %d time : %di: %d avg %f" % (total_batch, it, i, avg))
                feed = {G.x: samples, G.rewards: rewards, G.inputs: target}
                _ = sess.run(G.g_update, feed_dict=feed)
        # Test
        if total_batch % 5 == 0 or total_batch == TOTAL_BATCH - 1:
            avg = np.mean(np.sum(rewards, axis=1), axis=0) / SEQ_LENGTH
            buffer = 'epoch:\t' + str(total_batch) + '\treward:\t' + str(avg) + '\n'
            print('total_batch: ', total_batch, 'average reward: ', avg)
            log.write(buffer)

            saver_seqgan.save(sess, "./model/seq_gan/seq_gan", global_step=total_batch)

        g_beta.update_params()

        # train the discriminator
        for it in range(ADV_GEN_TIME // GEN_VS_DIS_TIME):
            # generate_samples(sess, G, BATCH_SIZE, generated_num, negative_file)
            generate_samples(sess, G, BATCH_SIZE, generated_num, negative_file, input_data_loader)
            dis_data_loader.load_train_data(y_file, negative_file)

            for _ in range(3):
                dis_data_loader.reset_pointer()
                for batch in range(dis_data_loader.num_batch):
                    x_batch, y_batch = dis_data_loader.next_batch()
                    feed = {
                        D.input_x: x_batch,
                        D.input_y: y_batch,
                        D.dropout_keep_prob: dis_dropout_keep_prob
                    }
                    _ = sess.run(D.train_op, feed_dict=feed)
        print("Adversarial Epoch consumed: ", time.time() - s, " s")

    # final generation
    print("Finished")
    log.close()
    # save model

    print("Training Finished, starting to generating test ")
    test_loader = Input_Data_loader(batch_size=BATCH_SIZE)
    test_loader.create_batches(test_x, test_y)

    generate_samples(sess, G, BATCH_SIZE, test_num, test_file + "_final.txt", test_loader)
Exemplo n.º 54
0
                                                                G))))))))
    elephant = q(
        G, q(q(W, W, q(G, G, B, G), W), W, G, W),
        q(W, G, q(G, G, G, q(W, W, G, W)), q(G, G, W, G)),
        q(
            q(q(W, q(W, W, G, q(G, W, q(q(W, W, W, G), G, W, W), W)), W, G), G,
              W, W), W, q(W, W, q(W, W, G, W), W), W))

    simpler_elephant = q(
        G, q(W, W, G, W), q(W, G, q(G, G, G, W), q(G, G, W, G)),
        q(
            q(q(W, q(W, W, G, q(G, W, q(q(W, W, W, G), G, W, W), W)), W, G), G,
              W, W), W, q(W, W, W, W), W))

    return [code_001, code_002, elephant, simpler_elephant]


if __name__ == "__main__":
    goal, gamma = make_family_1()

    gen = Generator(gamma)
    for tree_size in [3, 5]:
        num_trees = gen.get_num(tree_size, goal)
        print('tree_size =', tree_size, "-> num_trees =", num_trees)

        if num_trees:
            # generate a few random trees
            for i_sample in range(5):
                tree = gen.gen_one(tree_size, goal)
                print(tree_size, tree)
Exemplo n.º 55
0
    def __init__(self, args):
        self._log_step = args.log_step
        self._batch_size = args.batch_size
        self._image_size = args.image_size
        self._latent_dim = args.latent_dim
        self._coeff_gan = args.coeff_gan
        self._coeff_vae = args.coeff_vae
        self._coeff_reconstruct = args.coeff_reconstruct
        self._coeff_latent = args.coeff_latent
        self._coeff_kl = args.coeff_kl
        self._norm = 'instance' if args.instance_normalization else 'batch'
        self._use_resnet = args.use_resnet

        self._augment_size = self._image_size + (30 if self._image_size == 256
                                                 else 15)
        self._image_shape = [self._image_size, self._image_size, 3]

        self.is_train = tf.placeholder(tf.bool, name='is_train')
        self.lr = tf.placeholder(tf.float32, name='lr')
        self.global_step = tf.train.get_or_create_global_step(graph=None)

        image_a = self.image_a = \
            tf.placeholder(tf.float32, [self._batch_size] + self._image_shape, name='image_a')
        image_b = self.image_b = \
            tf.placeholder(tf.float32, [self._batch_size] + self._image_shape, name='image_b')
        z = self.z = \
            tf.placeholder(tf.float32, [self._batch_size, self._latent_dim], name='z')

        # Data augmentation
        seed = random.randint(0, 2**31 - 1)

        def augment_image(image):
            image = tf.image.resize_images(
                image, [self._augment_size, self._augment_size])
            image = tf.random_crop(image,
                                   [self._batch_size] + self._image_shape,
                                   seed=seed)
            image = tf.map_fn(
                lambda x: tf.image.random_flip_left_right(x, seed), image)
            return image

        image_a = tf.cond(self.is_train, lambda: augment_image(image_a),
                          lambda: image_a)
        image_b = tf.cond(self.is_train, lambda: augment_image(image_b),
                          lambda: image_b)

        # Generator
        G = Generator('G',
                      is_train=self.is_train,
                      norm=self._norm,
                      image_size=self._image_size)

        # Discriminator
        D = Discriminator('D',
                          is_train=self.is_train,
                          norm=self._norm,
                          activation='leaky',
                          image_size=self._image_size)

        # Encoder
        E = Encoder('E',
                    is_train=self.is_train,
                    norm=self._norm,
                    activation='relu',
                    image_size=self._image_size,
                    latent_dim=self._latent_dim,
                    use_resnet=self._use_resnet)

        # conditional VAE-GAN: B -> z -> B'
        z_encoded, z_encoded_mu, z_encoded_log_sigma = E(image_b)
        image_ab_encoded = G(image_a, z_encoded)

        # conditional Latent Regressor-GAN: z -> B' -> z'
        image_ab = self.image_ab = G(image_a, z)
        z_recon, z_recon_mu, z_recon_log_sigma = E(image_ab)

        # Discriminate real/fake images
        D_real = D(image_b)
        D_fake = D(image_ab)
        D_fake_encoded = D(image_ab_encoded)

        loss_vae_gan = (tf.reduce_mean(tf.squared_difference(D_real, 0.9)) +
                        tf.reduce_mean(tf.square(D_fake_encoded)))

        loss_image_cycle = tf.reduce_mean(tf.abs(image_b - image_ab_encoded))

        loss_gan = (tf.reduce_mean(tf.squared_difference(D_real, 0.9)) +
                    tf.reduce_mean(tf.square(D_fake)))

        loss_latent_cycle = tf.reduce_mean(tf.abs(z - z_recon))

        loss_kl = -0.5 * tf.reduce_mean(1 + 2 * z_encoded_log_sigma -
                                        z_encoded_mu**2 -
                                        tf.exp(2 * z_encoded_log_sigma))

        loss = self._coeff_vae * loss_vae_gan - self._coeff_reconstruct * loss_image_cycle + \
            self._coeff_gan * loss_gan - self._coeff_latent * loss_latent_cycle - \
            self._coeff_kl * loss_kl

        # Optimizer
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            self.optimizer_D = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.5) \
                                .minimize(loss, var_list=D.var_list, global_step=self.global_step)
            self.optimizer_G = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.5) \
                                .minimize(-loss, var_list=G.var_list)
            self.optimizer_E = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.5) \
                                .minimize(-loss, var_list=E.var_list)

        # Summaries
        self.loss_vae_gan = loss_vae_gan
        self.loss_image_cycle = loss_image_cycle
        self.loss_latent_cycle = loss_latent_cycle
        self.loss_gan = loss_gan
        self.loss_kl = loss_kl
        self.loss = loss

        tf.summary.scalar('loss/vae_gan', loss_vae_gan)
        tf.summary.scalar('loss/image_cycle', loss_image_cycle)
        tf.summary.scalar('loss/latent_cycle', loss_latent_cycle)
        tf.summary.scalar('loss/gan', loss_gan)
        tf.summary.scalar('loss/kl', loss_kl)
        tf.summary.scalar('loss/total', loss)
        tf.summary.scalar('model/D_real', tf.reduce_mean(D_real))
        tf.summary.scalar('model/D_fake', tf.reduce_mean(D_fake))
        tf.summary.scalar('model/D_fake_encoded',
                          tf.reduce_mean(D_fake_encoded))
        tf.summary.scalar('model/lr', self.lr)
        tf.summary.image('image/A', image_a[0:1])
        tf.summary.image('image/B', image_b[0:1])
        tf.summary.image('image/A-B', image_ab[0:1])
        tf.summary.image('image/A-B_encoded', image_ab_encoded[0:1])
        self.summary_op = tf.summary.merge_all()
Exemplo n.º 56
0
def main():
    random.seed(SEED)
    np.random.seed(SEED)
    assert START_TOKEN == 0

    word_dict = Word2index()
    word_dict.load_dict(vocab_file)

    gen_data_loader = Gen_Data_loader(word_dict, BATCH_SIZE)
    likelihood_data_loader = Gen_Data_loader(word_dict,
                                             BATCH_SIZE)  # For testing
    vocab_size = len(word_dict)  # changed 5000 to 10581
    dis_data_loader = Dis_dataloader(word_dict, BATCH_SIZE)

    generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM,
                          SEQ_LENGTH, START_TOKEN)
    target_params = cPickle.load(open('save/target_params_py3.pkl', 'rb'))

    target_lstm = TARGET_LSTM(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM,
                              SEQ_LENGTH, START_TOKEN,
                              target_params)  # The oracle model

    discriminator = Discriminator(sequence_length=20,
                                  num_classes=2,
                                  vocab_size=vocab_size,
                                  embedding_size=dis_embedding_dim,
                                  filter_sizes=dis_filter_sizes,
                                  num_filters=dis_num_filters,
                                  l2_reg_lambda=dis_l2_reg_lambda)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    gen_data_loader.create_batches(positive_file, gen_flag=1)

    log = open('save/experiment-log.txt', 'w')
    #  pre-train generator
    print('Start pre-training...')
    log.write('pre-training...\n')
    for epoch in range(PRE_EPOCH_NUM):
        loss = pre_train_epoch(sess, generator, gen_data_loader)
        if epoch % 5 == 0:
            generate_samples(sess, generator, BATCH_SIZE, generated_num,
                             eval_file)
            likelihood_data_loader.create_batches(eval_file)
            test_loss = target_loss(sess, target_lstm, likelihood_data_loader)
            print('pre-train epoch ', epoch, 'test_loss ', test_loss)
            buffer = 'epoch:\t' + str(epoch) + '\tnll:\t' + str(
                test_loss) + '\n'
            log.write(buffer)

    print('Start pre-training discriminator...')
    # Train 3 epoch on the generated data and do this for 50 times
    for epoch in range(50):
        generate_samples(sess, generator, BATCH_SIZE, generated_num,
                         negative_file)
        dis_data_loader.load_train_data(positive_file, negative_file)
        print("Epoch : ", epoch)
        for _ in range(3):
            dis_data_loader.reset_pointer()
            for it in range(dis_data_loader.num_batch):
                x_batch, y_batch = dis_data_loader.next_batch()
                feed = {
                    discriminator.input_x: x_batch,
                    discriminator.input_y: y_batch,
                    discriminator.dropout_keep_prob: dis_dropout_keep_prob
                }
                _ = sess.run(discriminator.train_op, feed)

    rollout = ROLLOUT(generator, 0.8)

    saver = tf.train.Saver()

    print(
        '#########################################################################'
    )
    print('Start Adversarial Training...')
    log.write('adversarial training...\n')
    for total_batch in range(TOTAL_BATCH):
        # Train the generator for one step
        for it in range(1):
            samples = generator.generate(sess)
            rewards = rollout.get_reward(sess, samples, 16, discriminator)
            feed = {generator.x: samples, generator.rewards: rewards}
            _ = sess.run(generator.g_updates, feed_dict=feed)

        # Test
        if total_batch % 5 == 0 or total_batch == TOTAL_BATCH - 1:
            generate_samples(sess, generator, BATCH_SIZE, generated_num,
                             eval_file)
            likelihood_data_loader.create_batches(eval_file, gen_flag=0)
            test_loss = target_loss(sess, target_lstm, likelihood_data_loader)
            buffer = 'epoch:\t' + str(total_batch) + '\tnll:\t' + str(
                test_loss) + '\n'
            print('total_batch: ', total_batch, 'test_loss: ', test_loss)
            log.write(buffer)

        # Update roll-out parameters
        rollout.update_params()

        # Train the discriminator
        for _ in range(5):
            generate_samples(sess, generator, BATCH_SIZE, generated_num,
                             negative_file)
            dis_data_loader.load_train_data(positive_file, negative_file)

            for _ in range(3):
                dis_data_loader.reset_pointer()
                for it in range(dis_data_loader.num_batch):
                    x_batch, y_batch = dis_data_loader.next_batch()
                    feed = {
                        discriminator.input_x: x_batch,
                        discriminator.input_y: y_batch,
                        discriminator.dropout_keep_prob: dis_dropout_keep_prob
                    }
                    _ = sess.run(discriminator.train_op, feed)

        if total_batch % 5 == 0:
            save_path = saver.save(sess,
                                   "save/checkpoints/model.ckpt",
                                   global_step=5)

    log.close()
 def __init__(self, model, input_filepath):
     Generator.__init__(self, model, input_filepath)
Exemplo n.º 58
0
                              label_file_path=label_file_path,
                              shuffle=True,
                              seq_length=seq_length,
                              batch_size=batch_size,
                              training=True)

# 设置路径
current_time = datetime.now().strftime('%Y%m%d-%H%M%S')
pretrain_checkpoint_dir = fr'training_checkpoints\{cell_line}\{a}'

# 加载模型
gen = Generator(dataloader=dataloader,
                vocab=vocab,
                batch_size=batch_size,
                embedding_dim=embedding_dim,
                seq_length=seq_length,
                checkpoint_dir=pretrain_checkpoint_dir,
                rnn_units=gen_rnn_units,
                start_token=0,
                learning_rate=learning_rate)

# 加载权重
gen.load_weights()

# 生成数据
# fake_samples = gen.generate(gen_seq_len)
# genned_sentences = vocab.extract_seqs(fake_samples)
# print("生成完毕,写入文件...")
#
# # 保存到本地
# save_fake_samples_filepath = './saved_fake_samples/{}.txt'.format(current_time)
 def domains_to_generate(self):
     return filter(ObjCGenerator.should_generate_domain_event_dispatcher_filter(self.model()), Generator.domains_to_generate(self))
    def generate_domain(self, domain):
        lines = []
        args = {'domain': domain.domain_name}

        lines.append('// %(domain)s.' % args)

        version = self.version_for_domain(domain)
        type_declarations = self.type_declarations_for_domain(domain)
        commands = self.commands_for_domain(domain)
        events = self.events_for_domain(domain)

        has_async_commands = any([command.is_async for command in commands])
        if len(events) > 0 or has_async_commands:
            lines.append(
                'InspectorBackend.register%(domain)sDispatcher = InspectorBackend.registerDomainDispatcher.bind(InspectorBackend, "%(domain)s");'
                % args)

        if isinstance(version, int):
            version_args = {'domain': domain.domain_name, 'version': version}
            lines.append(
                'InspectorBackend.registerVersion("%(domain)s", %(version)s);'
                % version_args)

        for declaration in type_declarations:
            if declaration.type.is_enum():
                enum_args = {
                    'domain':
                    domain.domain_name,
                    'enumName':
                    declaration.type_name,
                    'enumMap':
                    ", ".join([
                        '%s: "%s"' %
                        (Generator.stylized_name_for_enum_value(enum_value),
                         enum_value)
                        for enum_value in declaration.type.enum_values()
                    ])
                }
                lines.append(
                    'InspectorBackend.registerEnum("%(domain)s.%(enumName)s", {%(enumMap)s});'
                    % enum_args)

            def is_anonymous_enum_member(type_member):
                return isinstance(type_member.type,
                                  EnumType) and type_member.type.is_anonymous

            for _member in filter(is_anonymous_enum_member,
                                  declaration.type_members):
                enum_args = {
                    'domain':
                    domain.domain_name,
                    'enumName':
                    '%s%s' %
                    (declaration.type_name, ucfirst(_member.member_name)),
                    'enumMap':
                    ", ".join([
                        '%s: "%s"' %
                        (Generator.stylized_name_for_enum_value(enum_value),
                         enum_value)
                        for enum_value in _member.type.enum_values()
                    ])
                }
                lines.append(
                    'InspectorBackend.registerEnum("%(domain)s.%(enumName)s", {%(enumMap)s});'
                    % enum_args)

        def is_anonymous_enum_param(param):
            return isinstance(param.type, EnumType) and param.type.is_anonymous

        for event in events:
            for param in filter(is_anonymous_enum_param,
                                event.event_parameters):
                enum_args = {
                    'domain':
                    domain.domain_name,
                    'enumName':
                    '%s%s' %
                    (ucfirst(event.event_name), ucfirst(param.parameter_name)),
                    'enumMap':
                    ", ".join([
                        '%s: "%s"' %
                        (Generator.stylized_name_for_enum_value(enum_value),
                         enum_value)
                        for enum_value in param.type.enum_values()
                    ])
                }
                lines.append(
                    'InspectorBackend.registerEnum("%(domain)s.%(enumName)s", {%(enumMap)s});'
                    % enum_args)

            event_args = {
                'domain':
                domain.domain_name,
                'eventName':
                event.event_name,
                'params':
                ", ".join([
                    '"%s"' % parameter.parameter_name
                    for parameter in event.event_parameters
                ])
            }
            lines.append(
                'InspectorBackend.registerEvent("%(domain)s.%(eventName)s", [%(params)s]);'
                % event_args)

        for command in commands:

            def generate_parameter_object(parameter):
                optional_string = "true" if parameter.is_optional else "false"
                pairs = []
                pairs.append('"name": "%s"' % parameter.parameter_name)
                pairs.append(
                    '"type": "%s"' %
                    Generator.js_name_for_parameter_type(parameter.type))
                pairs.append('"optional": %s' % optional_string)
                return "{%s}" % ", ".join(pairs)

            command_args = {
                'domain':
                domain.domain_name,
                'commandName':
                command.command_name,
                'callParams':
                ", ".join([
                    generate_parameter_object(parameter)
                    for parameter in command.call_parameters
                ]),
                'returnParams':
                ", ".join([
                    '"%s"' % parameter.parameter_name
                    for parameter in command.return_parameters
                ]),
            }
            lines.append(
                'InspectorBackend.registerCommand("%(domain)s.%(commandName)s", [%(callParams)s], [%(returnParams)s]);'
                % command_args)

        activate_args = {
            'domain':
            domain.domain_name,
            'availability':
            json.dumps(domain.availability) if domain.availability else '',
        }
        if domain.availability:
            lines.append(
                'InspectorBackend.activateDomain("%(domain)s", %(availability)s);'
                % activate_args)
        else:
            lines.append('InspectorBackend.activateDomain("%(domain)s");' %
                         activate_args)

        return "\n".join(lines)