Example #1
0
class Person(get_base_model(PERSON_BASE_MODEL)):
    def first_image(self):
        try:
            return PersonImage.objects.filter(person=self).\
                order_by('sort')[0].image
        except IndexError:
            return None
Example #2
0
class Squad(get_base_model(SQUAD_BASE_MODEL)):
    class Meta:
        app_label = 'teams'

    def transfer_in(self):
        return Transfer.objects.filter(new=self)

    def transfer_out(self):
        return Transfer.objects.filter(old=self)

    def splayers(self):
        return Player.objects.filter(squad=self).order_by('number')

    def scontacts(self):
        return Contact.objects.filter(squad=self).order_by('sortorder')

    def sstaff(self):
        return Staff.objects.filter(squad=self).order_by('sortorder')

    def first_image(self):
        try:
            return SquadImage.objects.filter(squad=self).\
                order_by('sort')[0].image
        except IndexError:
            return None
Example #3
0
class Staff(get_base_model(SQUAD_PERSON_BASE_MODEL)):
    function = models.CharField(max_length=50)

    class Meta:
        ordering = ['sortorder', 'person']

    def __unicode__(self):
        return u'%s %s %s' % (self.squad, self.person, self.function)
Example #4
0
class Team(get_base_model(TEAM_BASE_MODEL)):
    def first_image(self):
        try:
            return TeamImage.objects.filter(team=self).\
                order_by('sort')[0].image
        except IndexError:
            return None

    def seasons(self):
        return Season.objects.filter(squad__team=self).order_by('slug')
Example #5
0
class Player(get_base_model(SQUAD_PERSON_BASE_MODEL)):
    number = models.SmallIntegerField()
    positions = models.ManyToManyField('Position')

    class Meta:
        ordering = ['number', 'sortorder', 'person']

    def position_part_list(self):
        seen = set()
        seen_add = seen.add
        result = []
        for p in self.positions.all():
            for x in p.name.split(' '):
                if x not in seen and not seen_add(x):
                    result.append(x)
        return result

    def __unicode__(self):
        return u'%s %s %d' % (self.squad, self.person, self.number)
Example #6
0
IMG_SIZE = utils.img_size()
TRAIN_DIR = utils.train_dir(TYPE)
VALIDATION_DIR = utils.validation_dir(TYPE)

loss = utils.get_loss(TYPE)
optimizer = tf.keras.optimizers.RMSprop(lr=learning_rate)
fine_tune_optimizer = tf.keras.optimizers.RMSprop(lr=fine_tune_learning_rate)

total_epochs = epochs + fine_tune_epochs

# EMPIEZA

train_generator, validation_generator = utils.get_generators(TYPE, batch_size)

base_model = utils.get_base_model(MODELNAME)

base_model.trainable = False

if print_model_summary:
    base_model.summary()

model = utils.get_model(TYPE, base_model, dropout)

model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])

if print_model_summary:
    model.summary()

history = model.fit(train_generator,
                    epochs=epochs,
Example #7
0
    def __init__(self, args):
        super().__init__(args)

        self.args = args
        self.d_model = OUT_BLOCK4_DIMENSION_DICT[self.args.network_base]

        full_resnet = get_base_model(self.args.network_base)

        self.init_layers = nn.Sequential(
            full_resnet.conv1,
            full_resnet.bn1,
            full_resnet.relu,
            full_resnet.maxpool,
        )

        self.block1 = full_resnet.layer1
        self.block2 = full_resnet.layer2
        self.block3 = full_resnet.layer3
        self.block4 = full_resnet.layer4

        self.image_network = nn.Sequential(
            full_resnet.conv1,
            full_resnet.bn1,
            full_resnet.relu,
            full_resnet.maxpool,
            self.block1,
            self.block2,
            self.block3,
            self.block4,
        )

        self.obj_detection_head = self.args.obj_det_head

        self.gen_roadmap = self.args.gen_road_map
        self.gen_semantic_map = self.args.gen_semantic_map
        self.gen_object_map = self.args.gen_object_map
        self.detect_objects = self.args.detect_objects

        self.fusion = self.args.view_fusion_strategy

        self.obj_detection_head = "retinanet"

        if self.d_model > 1024:
            self.max_f = 1024
        else:
            self.max_f = self.d_model

        # print(self.args.finetune_obj)
        self.model_type = self.args.finetune_obj.split("_")[0]

        self.input_dim = 256

        self.blobs_strategy = self.args.blobs_strategy

        # print(self.fusion)
        self.dense_fuse = "dense" in self.fusion
        self.conv_fuse = "conv" in self.fusion
        self.dense_before_fuse = "dbf" in self.fusion
        self.dense_after_fuse = "daf" in self.fusion

        self.frefine_layers = 0
        self.brefine_layers = 0

        if self.conv_fuse or self.dense_fuse:
            self.frefine_layers = 1
            self.brefine_layers = 1

        self.drefine_layers = 0
        self.dcrefine_layers = 0
        if self.dense_before_fuse:
            self.drefine_layers = 1
        if self.dense_after_fuse:
            self.dcrefine_layers = 1

        if self.gen_roadmap or self.gen_semantic_map or self.gen_object_map or (
                self.detect_objects and "decoder" in self.blobs_strategy):
            # print("dfuse",self.dense_fuse, "cfuse", self.conv_fuse, "dproj", self.dense_project)
            self.fuse = Fusion(args,
                               self.d_model,
                               frefine_layers=self.frefine_layers,
                               brefine_layers=self.brefine_layers,
                               drefine_layers=self.drefine_layers,
                               dense_fusion=self.dense_fuse,
                               conv_fusion=self.conv_fuse,
                               dcrefine_layers=self.dcrefine_layers)
            # print(self.fuse)

        out_dim = 1

        if self.gen_roadmap or self.gen_semantic_map or self.gen_object_map:

            if self.model_type == "det":

                if self.dense_fuse:

                    init_layer_dim = 32
                    init_channel_dim = 64

                    self.decoder_network = DecoderNetwork(
                        self.args,
                        init_layer_dim,
                        init_channel_dim,
                        self.max_f,
                        self.d_model,
                        add_initial_upsample_conv=True)

                    self.decoding = nn.Sequential(self.decoder_network)

                else:

                    if self.drefine_layers > 0 or self.dcrefine_layers > 0:
                        # print("changed add_convs_before_decoding")

                        if self.dcrefine_layers > 0:
                            init_layer_dim = 32
                            init_channel_dim = 64
                            self.decoder_network = DecoderNetwork(
                                self.args,
                                init_layer_dim,
                                init_channel_dim,
                                self.max_f,
                                self.d_model,
                                add_initial_upsample_conv=True)

                        else:
                            init_layer_dim = 32
                            init_channel_dim = 128
                            self.decoder_network = DecoderNetwork(
                                self.args,
                                init_layer_dim,
                                init_channel_dim,
                                self.max_f,
                                self.d_model,
                                add_initial_upsample_conv=True)
                        # print("add_convs_before_decoding", self.add_convs_before_decoding)

                    else:
                        init_layer_dim = 8
                        init_channel_dim = 128
                        self.decoder_network = DecoderNetwork(
                            self.args,
                            init_layer_dim,
                            init_channel_dim,
                            self.max_f,
                            self.d_model,
                            add_convs_before_decoding=True)

                    self.decoding = nn.Sequential(self.decoder_network)

            else:
                self.max_f = 64
                self.latent_dim = self.args.latent_dim
                init_layer_dim = 32
                init_channel_dim = self.max_f

                self.decoder_network = DecoderNetwork(
                    self.args,
                    init_layer_dim,
                    init_channel_dim,
                    self.max_f,
                    self.d_model,
                    add_initial_upsample_conv=True)

                if self.conv_fuse:
                    self.avg_pool_refine = dblock(self.latent_dim,
                                                  self.latent_dim)

                self.z_project = nn.Linear(self.d_model, 2 * self.latent_dim)
                self.z_refine = dblock(self.latent_dim, self.latent_dim)
                self.z_reshape = dblock(self.latent_dim, 16 * 16 * 32)
                self.decoding = nn.Sequential(self.decoder_network,
                                              self.z_refine, self.refine,
                                              self.z_reshape, self.z_project)

            # print(self.decoder_network)
            self.loss_type = self.args.road_map_loss

            if self.loss_type == "mse":
                self.criterion = torch.nn.MSELoss()
            elif self.loss_type == "bce":
                if self.gen_roadmap:
                    self.criterion = torch.nn.BCEWithLogitsLoss()
                else:
                    self.criterion = nn.CrossEntropyLoss()

        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        if "retinanet" in self.obj_detection_head and self.detect_objects:
            if "decoder" in self.blobs_strategy:
                self.obj_detection_model = ObjectDetectionHeads(
                    args, self.image_network, self.decoder_network)
            else:
                self.obj_detection_model = ObjectDetectionHeads(
                    args, self.image_network)

        self.dropout = torch.nn.Dropout(p=0.5, inplace=False)

        self.sigmoid = nn.Sigmoid()

        self.shared_params = list(self.image_network.parameters())

        if self.gen_roadmap or self.gen_semantic_map or self.gen_object_map or (
                self.detect_objects and "decoder" in self.blobs_strategy):
            if args.imagessl_load_ckpt:
                self.finetune_params += list(self.fuse.parameters())
            else:
                self.shared_params += list(self.fuse.parameters())

        if self.gen_roadmap or self.gen_semantic_map or self.gen_object_map:
            self.finetune_params += list(self.decoding.parameters())

        if self.detect_objects:
            self.finetune_params += list(
                self.obj_detection_model.params.parameters())
Example #8
0
class Contact(get_base_model(SQUAD_PERSON_BASE_MODEL)):
    address = models.CharField(max_length=100, null=True, blank=True)
    phone = models.CharField(max_length=15, null=True, blank=True)
Example #9
0
class Date(get_base_model(DATE_BASE_MODEL)):
    pass
Example #10
0
class PersonAttribute(get_base_model(PERSON_ATTR_BASE_MODEL)):
    pass