示例#1
0
    def make_train(batch_id, config, phase, **inputs):
        for i in inputs:
            inputs[i] = make_input(inputs[i])

        net = config['inference']['net']
        config['batch_id'] = batch_id

        if phase != 'inference':
            result = net(inputs['imgs'],
                         **{i: inputs[i]
                            for i in inputs if i != 'imgs'})

            num_loss = len(config['train']['loss'])

            ## I use the last outputs as the loss
            ## the weights of the loss are controlled by config['train']['loss']
            losses = {
                i[0]: result[-num_loss + idx] * i[1]
                for idx, i in enumerate(config['train']['loss'])
            }

            loss = 0
            toprint = '\n{}: '.format(batch_id)
            for i in losses:
                loss = loss + torch.mean(losses[i])

                my_loss = make_output(losses[i])
                my_loss = my_loss.mean(axis=0)

                if my_loss.size == 1:
                    toprint += ' {}: {}'.format(i,
                                                format(my_loss.mean(), '.8f'))
                else:
                    toprint += '\n{}'.format(i)
                    for j in my_loss:
                        toprint += ' {}'.format(format(j.mean(), '.8f'))

            logger.write(toprint)
            logger.flush()

            if batch_id == 200000:
                ## decrease the learning rate after 200000 iterations
                for param_group in optimizer.param_groups:
                    param_group['lr'] = 1e-5

            if phase == 'train':
                optimizer = train_cfg['optimizer']
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            return None
        else:
            out = {}
            net = net.eval()
            result = net(**inputs)
            if type(result) != list and type(result) != tuple:
                result = [result]
            out['preds'] = [make_output(i) for i in result]
            return out
示例#2
0
    def make_train(batch_id, config, phase, **inputs):
        for i in inputs:
            inputs[i] = make_input(inputs[i])

        net = config['inference']['net']
        config['batch_id'] = batch_id

        if phase != 'inference':
            result = net(inputs['imgs'], **{i:inputs[i] for i in inputs if i!='imgs'})

            num_loss = len(config['train']['loss'])

            ## I use the last outputs as the loss
            ## the weights of the loss are controlled by config['train']['loss'] 
            losses = {i[0]: result[-num_loss + idx]*i[1] for idx, i in enumerate(config['train']['loss'])}

            loss = 0
            toprint = '\n{}: '.format(batch_id)
            for i in losses:
                loss = loss + torch.mean(losses[i])

                my_loss = make_output( losses[i] )
                my_loss = my_loss.mean(axis = 0)

                if my_loss.size == 1:
                    toprint += ' {}: {}'.format(i, format(my_loss.mean(), '.8f'))
                else:
                    toprint += '\n{}'.format(i)
                    for j in my_loss:
                        toprint += ' {}'.format(format(j.mean(), '.8f'))

            logger.write(toprint)
            logger.flush()

            if batch_id == 200000:
                ## decrease the learning rate after 200000 iterations
                for param_group in optimizer.param_groups:
                    param_group['lr'] = 1e-5

            optimizer = train_cfg['optimizer']
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            return None
        else:
            out = {}
            net = net.eval()
            result = net(**inputs)
            if type(result)!=list and type(result)!=tuple:
                result = [result]
            out['preds'] = [make_output(i) for i in result]
            return out
示例#3
0
def train_func(opts, model, optimizer, phase, **inputs):

    for i in inputs:
        inputs[i] = make_input(inputs[i])

    if phase == 'train':
        net = model.train()
    else:
        net = model.eval()

    forward_net = DataParallel(net.cuda())

    if phase != 'inference':
        if phase == 'valid':
            with torch.no_grad():
                output = forward_net(inputs['imgs'])
                losses = model.calc_loss(
                    output, **{i: inputs[i]
                               for i in inputs if i != 'imgs'})
                losses = {
                    'push_loss': losses[0] * opts.push_loss,
                    'pull_loss': losses[1] * opts.pull_loss,
                    'detection_loss': losses[2] * opts.detection_loss
                }
                loss = 0
        else:
            output = forward_net(inputs['imgs'])
            losses = model.calc_loss(
                output, **{i: inputs[i]
                           for i in inputs if i != 'imgs'})
            losses = {
                'push_loss': losses[0] * opts.push_loss,
                'pull_loss': losses[1] * opts.pull_loss,
                'detection_loss': losses[2] * opts.detection_loss
            }
            loss = 0

        for i in losses:
            loss = loss + torch.mean(losses[i])

            my_loss = make_output(losses[i])
            my_loss = my_loss.mean(axis=0)

            #if my_loss.size == 1:
            #   toprint += ' {}: {}'.format(i, format(my_loss.mean(), '.8f'))
            #else:
            #   toprint += '\n{}'.format(i)
            #  for j in my_loss:
            #     toprint += ' {}'.format(format(j.mean(), '.8f'))

        if phase == 'train':

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        return loss
示例#4
0
def test_func(model, **inputs):
    for i in inputs:
        inputs[i] = make_input(inputs[i])
    net = model.eval()

    forward_net = DataParallel(net.cuda())
    out = {}
    net = forward_net.eval()
    result = net(**inputs)
    if type(result) != list and type(result) != tuple:
        result = [result]
    out['preds'] = [make_output(i) for i in result]
    return out
示例#5
0
    def make_train(batch_id, config, phase, **inputs):
        # get the gradient of the input and make it cuda data type
        for i in inputs:
            inputs[i] = make_input(inputs[i])

        net = config['inference']['net']
        config['batch_id'] = batch_id

        # check current phase, set it train or evaluation
        if phase == 'train':
            net = net.train()
        else:
            net = net.eval()
        # check if current stage is inference or not, it relate to 'train' and 'evaluation'
        if phase != 'inference':
            # if it is 'train'.
            # {i: inputs[i] for i in inputs if i!='imgs'} is to separate inputs from images
            result = net(inputs['imgs'], **{i: inputs[i] for i in inputs if i!='imgs'})

            num_loss = len(config['train']['loss'])

            "I use the last outputs as the loss"
            "the weights of the loss are controlled by config['train']['loss'] "
            losses = {i[0]: result[-num_loss + idx]*i[1] for idx, i in enumerate(config['train']['loss'])}

            loss = 0
            # this is to write the log of training process
            toprint = '\n{}: '.format(batch_id)
            for i in losses:
                loss = loss + torch.mean(losses[i])

                my_loss = make_output( losses[i] )
                my_loss = my_loss.mean(axis = 0)

                if my_loss.size == 1:
                    toprint += ' {}: {}'.format(i, format(my_loss.mean(), '.8f'))
                else:
                    toprint += '\n{}'.format(i)
                    for j in my_loss:
                        toprint += ' {}'.format(format(j.mean(), '.8f'))

            logger.write(toprint)
            logger.flush()

            if batch_id == 200000:
                ## decrease the learning rate after 200000 iterations
                for param_group in optimizer.param_groups:
                    param_group['lr'] = 1e-5

            if phase == 'train':
                optimizer = train_cfg['optimizer']
                # set the gradient to zero, before backpropragation. The PyTorch accumulates the gradients
                # on subsequent backward pass, it suitable for RNN rather than CNN
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            return None
        else:
            # if it is not 'train'. used when it is test.py. need it to return the predictions
            # return matrix cpu data type
            out = {}
            net = net.eval()
            result = net(**inputs)
            if type(result)!=list and type(result)!=tuple:
                result = [result]
            out['preds'] = [make_output(i) for i in result]
            return out