예제 #1
0
파일: main.py 프로젝트: nftqcd/fthmc
def run_fthmc(
        flow: nn.ModuleList,
        config: TrainConfig,
        lfconfig: lfConfig,
        xi: torch.Tensor = None,
        nprint: int = 50,
        nplot: int = 10,
        window: int = 1,
        num_trajs: int = 1024,
        **kwargs,
):
    logger.rule(f'Running `ftHMC` using trained flow for {num_trajs} trajs')
    if torch.cuda.is_available():
        flow.to('cuda')

    flow.eval()

    ft = FieldTransformation(flow=flow, config=config, lfconfig=lfconfig)
    logdir = config.logdir
    ftstr = lfconfig.uniquestr()
    fthmcdir = os.path.join(logdir, 'ftHMC', ftstr)
    pdir = os.path.join(fthmcdir, 'plots')
    sdir = os.path.join(fthmcdir, 'summaries')
    writer = SummaryWriter(log_dir=sdir)
    history = ft.run(x=xi, nprint=nprint, nplot=nplot, window=window,
                     num_trajs=num_trajs, writer=writer, plotdir=pdir,
                     **kwargs)
    histfile = os.path.join(fthmcdir, 'history.z')
    logger.log(f'Saving history to: {histfile}')
    joblib.dump(history, histfile)

    return {'field_transformation': ft, 'history': history}
예제 #2
0
class GramDistanceResnet50(Module):
    """A module which calculates the GramMatrixLoss of two images after every bottleneck layer in ResNet50"""
    def __init__(self):
        super().__init__()
        self.layers = ModuleList(self.prepare_layers())
        self.layers.eval()
        self.num_gram_layers = sum(1 for layer in self.layers
                                   if isinstance(layer, GramMatrixLoss))

    def prepare_layers(self):
        """Insert GramMatrixLoss layers after every bottleneck layer in the Resnet50 architecture"""
        base_model = resnet50(pretrained=True)
        layers = [
            base_model.conv1,
            GramMatrixLoss(), base_model.bn1, base_model.relu,
            base_model.maxpool, base_model.layer1[0],
            GramMatrixLoss(), base_model.layer1[1],
            GramMatrixLoss(), base_model.layer1[2],
            GramMatrixLoss(), base_model.layer2[0],
            GramMatrixLoss(), base_model.layer2[1],
            GramMatrixLoss(), base_model.layer2[2],
            GramMatrixLoss(), base_model.layer2[3],
            GramMatrixLoss(), base_model.layer3[0],
            GramMatrixLoss(), base_model.layer3[1],
            GramMatrixLoss(), base_model.layer3[2],
            GramMatrixLoss(), base_model.layer3[3],
            GramMatrixLoss(), base_model.layer3[4],
            GramMatrixLoss(), base_model.layer3[5],
            GramMatrixLoss(), base_model.layer4[0],
            GramMatrixLoss(), base_model.layer4[1],
            GramMatrixLoss(), base_model.layer4[2],
            GramMatrixLoss()
        ]
        return layers

    def forward(self, img1, img2):
        """Calculate the Gram matrix distances between pairs of images

        Parameters
        ----------
        img1: torch.Tensor
            The first batch of images as a tensor with shape (batch, channels, height, width)
        img2: torch.Tensor
            The second batch of images as a tensor with shape (batch, channels, height, width)

        Returns
        -------
        torch.Tensor
            A tensor containing the Gram matrix distances for each example in the batch with shape
            (batch, layers), where layers corresponds to the number of GramMatrixLoss layers.
        """
        if img1.size() != img2.size():
            raise ValueError(
                'Input images to GramMatrixLoss must have the same shape!')
        features1 = img1
        features2 = img2

        losses = []
        for layer in self.layers:
            if isinstance(layer, GramMatrixLoss):
                loss = layer(features1, features2)
                losses.append(loss)
            else:
                features1 = layer(features1)
                features2 = layer(features2)
        losses = torch.stack(losses)
        return losses