Spaces:
Runtime error
Runtime error
| # from https://github.com/mseitzer/pytorch-fid | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| import torchvision | |
| from scipy import linalg | |
| from torch.nn.functional import adaptive_avg_pool2d | |
| try: | |
| from torchvision.models.utils import load_state_dict_from_url | |
| except ImportError: | |
| from torch.utils.model_zoo import load_url as load_state_dict_from_url | |
| FID_WEIGHTS_URL = ( | |
| "https://github.com/mseitzer/pytorch-fid/releases/download/" | |
| + "fid_weights/pt_inception-2015-12-05-6726825d.pth" | |
| ) | |
| class InceptionV3(nn.Module): | |
| """Pretrained InceptionV3 network returning feature maps""" | |
| # Index of default block of inception to return, | |
| # corresponds to output of final average pooling | |
| DEFAULT_BLOCK_INDEX = 3 | |
| # Maps feature dimensionality to their output blocks indices | |
| BLOCK_INDEX_BY_DIM = { | |
| 64: 0, # First max pooling features | |
| 192: 1, # Second max pooling features | |
| 768: 2, # Pre-aux classifier features | |
| 2048: 3, # Final average pooling features | |
| } | |
| def __init__( | |
| self, | |
| output_blocks=[DEFAULT_BLOCK_INDEX], | |
| resize_input=True, | |
| normalize_input=True, | |
| requires_grad=False, | |
| use_fid_inception=True, | |
| ): | |
| """Build pretrained InceptionV3 | |
| Parameters | |
| ---------- | |
| output_blocks : list of int | |
| Indices of blocks to return features of. Possible values are: | |
| - 0: corresponds to output of first max pooling | |
| - 1: corresponds to output of second max pooling | |
| - 2: corresponds to output which is fed to aux classifier | |
| - 3: corresponds to output of final average pooling | |
| resize_input : bool | |
| If true, bilinearly resizes input to width and height 299 before | |
| feeding input to model. As the network without fully connected | |
| layers is fully convolutional, it should be able to handle inputs | |
| of arbitrary size, so resizing might not be strictly needed | |
| normalize_input : bool | |
| If true, scales the input from range (0, 1) to the range the | |
| pretrained Inception network expects, namely (-1, 1) | |
| requires_grad : bool | |
| If true, parameters of the model require gradients. Possibly useful | |
| for finetuning the network | |
| use_fid_inception : bool | |
| If true, uses the pretrained Inception model used in Tensorflow's | |
| FID implementation. If false, uses the pretrained Inception model | |
| available in torchvision. The FID Inception model has different | |
| weights and a slightly different structure from torchvision's | |
| Inception model. If you want to compute FID scores, you are | |
| strongly advised to set this parameter to true to get comparable | |
| results. | |
| """ | |
| super(InceptionV3, self).__init__() | |
| self.resize_input = resize_input | |
| self.normalize_input = normalize_input | |
| self.output_blocks = sorted(output_blocks) | |
| self.last_needed_block = max(output_blocks) | |
| assert self.last_needed_block <= 3, "Last possible output block index is 3" | |
| self.blocks = nn.ModuleList() | |
| if use_fid_inception: | |
| inception = fid_inception_v3() | |
| else: | |
| inception = _inception_v3(pretrained=True) | |
| # Block 0: input to maxpool1 | |
| block0 = [ | |
| inception.Conv2d_1a_3x3, | |
| inception.Conv2d_2a_3x3, | |
| inception.Conv2d_2b_3x3, | |
| nn.MaxPool2d(kernel_size=3, stride=2), | |
| ] | |
| self.blocks.append(nn.Sequential(*block0)) | |
| # Block 1: maxpool1 to maxpool2 | |
| if self.last_needed_block >= 1: | |
| block1 = [ | |
| inception.Conv2d_3b_1x1, | |
| inception.Conv2d_4a_3x3, | |
| nn.MaxPool2d(kernel_size=3, stride=2), | |
| ] | |
| self.blocks.append(nn.Sequential(*block1)) | |
| # Block 2: maxpool2 to aux classifier | |
| if self.last_needed_block >= 2: | |
| block2 = [ | |
| inception.Mixed_5b, | |
| inception.Mixed_5c, | |
| inception.Mixed_5d, | |
| inception.Mixed_6a, | |
| inception.Mixed_6b, | |
| inception.Mixed_6c, | |
| inception.Mixed_6d, | |
| inception.Mixed_6e, | |
| ] | |
| self.blocks.append(nn.Sequential(*block2)) | |
| # Block 3: aux classifier to final avgpool | |
| if self.last_needed_block >= 3: | |
| block3 = [ | |
| inception.Mixed_7a, | |
| inception.Mixed_7b, | |
| inception.Mixed_7c, | |
| nn.AdaptiveAvgPool2d(output_size=(1, 1)), | |
| ] | |
| self.blocks.append(nn.Sequential(*block3)) | |
| for param in self.parameters(): | |
| param.requires_grad = requires_grad | |
| def forward(self, inp): | |
| """Get Inception feature maps | |
| Parameters | |
| ---------- | |
| inp : torch.autograd.Variable | |
| Input tensor of shape Bx3xHxW. Values are expected to be in | |
| range (0, 1) | |
| Returns | |
| ------- | |
| List of torch.autograd.Variable, corresponding to the selected output | |
| block, sorted ascending by index | |
| """ | |
| outp = [] | |
| x = inp | |
| if self.resize_input: | |
| x = F.interpolate(x, size=(299, 299), mode="bilinear", align_corners=False) | |
| if self.normalize_input: | |
| x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) | |
| for idx, block in enumerate(self.blocks): | |
| x = block(x) | |
| if idx in self.output_blocks: | |
| outp.append(x) | |
| if idx == self.last_needed_block: | |
| break | |
| return outp | |
| def _inception_v3(*args, **kwargs): | |
| """Wraps `torchvision.models.inception_v3` | |
| Skips default weight initialization if supported by torchvision version. | |
| See https://github.com/mseitzer/pytorch-fid/issues/28. | |
| """ | |
| try: | |
| version = tuple(map(int, torchvision.__version__.split(".")[:2])) | |
| except ValueError: | |
| # Just a caution against weird version strings | |
| version = (0,) | |
| if version >= (0, 6): | |
| kwargs["init_weights"] = False | |
| return torchvision.models.inception_v3(*args, **kwargs) | |
| def fid_inception_v3(): | |
| """Build pretrained Inception model for FID computation | |
| The Inception model for FID computation uses a different set of weights | |
| and has a slightly different structure than torchvision's Inception. | |
| This method first constructs torchvision's Inception and then patches the | |
| necessary parts that are different in the FID Inception model. | |
| """ | |
| inception = _inception_v3(num_classes=1008, aux_logits=False, pretrained=False) | |
| inception.Mixed_5b = FIDInceptionA(192, pool_features=32) | |
| inception.Mixed_5c = FIDInceptionA(256, pool_features=64) | |
| inception.Mixed_5d = FIDInceptionA(288, pool_features=64) | |
| inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) | |
| inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) | |
| inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) | |
| inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) | |
| inception.Mixed_7b = FIDInceptionE_1(1280) | |
| inception.Mixed_7c = FIDInceptionE_2(2048) | |
| state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) | |
| inception.load_state_dict(state_dict) | |
| return inception | |
| class FIDInceptionA(torchvision.models.inception.InceptionA): | |
| """InceptionA block patched for FID computation""" | |
| def __init__(self, in_channels, pool_features): | |
| super(FIDInceptionA, self).__init__(in_channels, pool_features) | |
| def forward(self, x): | |
| branch1x1 = self.branch1x1(x) | |
| branch5x5 = self.branch5x5_1(x) | |
| branch5x5 = self.branch5x5_2(branch5x5) | |
| branch3x3dbl = self.branch3x3dbl_1(x) | |
| branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) | |
| branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) | |
| # Patch: Tensorflow's average pool does not use the padded zero's in | |
| # its average calculation | |
| branch_pool = F.avg_pool2d( | |
| x, kernel_size=3, stride=1, padding=1, count_include_pad=False | |
| ) | |
| branch_pool = self.branch_pool(branch_pool) | |
| outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] | |
| return torch.cat(outputs, 1) | |
| class FIDInceptionC(torchvision.models.inception.InceptionC): | |
| """InceptionC block patched for FID computation""" | |
| def __init__(self, in_channels, channels_7x7): | |
| super(FIDInceptionC, self).__init__(in_channels, channels_7x7) | |
| def forward(self, x): | |
| branch1x1 = self.branch1x1(x) | |
| branch7x7 = self.branch7x7_1(x) | |
| branch7x7 = self.branch7x7_2(branch7x7) | |
| branch7x7 = self.branch7x7_3(branch7x7) | |
| branch7x7dbl = self.branch7x7dbl_1(x) | |
| branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) | |
| branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) | |
| branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) | |
| branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) | |
| # Patch: Tensorflow's average pool does not use the padded zero's in | |
| # its average calculation | |
| branch_pool = F.avg_pool2d( | |
| x, kernel_size=3, stride=1, padding=1, count_include_pad=False | |
| ) | |
| branch_pool = self.branch_pool(branch_pool) | |
| outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] | |
| return torch.cat(outputs, 1) | |
| class FIDInceptionE_1(torchvision.models.inception.InceptionE): | |
| """First InceptionE block patched for FID computation""" | |
| def __init__(self, in_channels): | |
| super(FIDInceptionE_1, self).__init__(in_channels) | |
| def forward(self, x): | |
| branch1x1 = self.branch1x1(x) | |
| branch3x3 = self.branch3x3_1(x) | |
| branch3x3 = [ | |
| self.branch3x3_2a(branch3x3), | |
| self.branch3x3_2b(branch3x3), | |
| ] | |
| branch3x3 = torch.cat(branch3x3, 1) | |
| branch3x3dbl = self.branch3x3dbl_1(x) | |
| branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) | |
| branch3x3dbl = [ | |
| self.branch3x3dbl_3a(branch3x3dbl), | |
| self.branch3x3dbl_3b(branch3x3dbl), | |
| ] | |
| branch3x3dbl = torch.cat(branch3x3dbl, 1) | |
| # Patch: Tensorflow's average pool does not use the padded zero's in | |
| # its average calculation | |
| branch_pool = F.avg_pool2d( | |
| x, kernel_size=3, stride=1, padding=1, count_include_pad=False | |
| ) | |
| branch_pool = self.branch_pool(branch_pool) | |
| outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] | |
| return torch.cat(outputs, 1) | |
| class FIDInceptionE_2(torchvision.models.inception.InceptionE): | |
| """Second InceptionE block patched for FID computation""" | |
| def __init__(self, in_channels): | |
| super(FIDInceptionE_2, self).__init__(in_channels) | |
| def forward(self, x): | |
| branch1x1 = self.branch1x1(x) | |
| branch3x3 = self.branch3x3_1(x) | |
| branch3x3 = [ | |
| self.branch3x3_2a(branch3x3), | |
| self.branch3x3_2b(branch3x3), | |
| ] | |
| branch3x3 = torch.cat(branch3x3, 1) | |
| branch3x3dbl = self.branch3x3dbl_1(x) | |
| branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) | |
| branch3x3dbl = [ | |
| self.branch3x3dbl_3a(branch3x3dbl), | |
| self.branch3x3dbl_3b(branch3x3dbl), | |
| ] | |
| branch3x3dbl = torch.cat(branch3x3dbl, 1) | |
| # Patch: The FID Inception model uses max pooling instead of average | |
| # pooling. This is likely an error in this specific Inception | |
| # implementation, as other Inception models use average pooling here | |
| # (which matches the description in the paper). | |
| branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) | |
| branch_pool = self.branch_pool(branch_pool) | |
| outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] | |
| return torch.cat(outputs, 1) | |
| def compute_val_fid(trainer, verbose=0): | |
| """ | |
| Compute the fid score between the n=opts.train.fid.n_images real images | |
| from the validation set (domain is rf) and n fake images pained from | |
| those n validation images | |
| Args: | |
| trainer (climategan.Trainer): trainer to compute the val fid for | |
| Returns: | |
| float: FID score | |
| """ | |
| # get opts params | |
| batch_size = trainer.opts.train.fid.get("batch_size", 50) | |
| dims = trainer.opts.train.fid.get("dims", 2048) | |
| # set inception model | |
| block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] | |
| model = InceptionV3([block_idx]).to(trainer.device) | |
| # first fid computation: compute the real stats, only once | |
| if trainer.real_val_fid_stats is None: | |
| if verbose > 0: | |
| print("Computing real_val_fid_stats for the first time") | |
| set_real_val_fid_stats(trainer, model, batch_size, dims) | |
| # get real stats | |
| real_m = trainer.real_val_fid_stats["m"] | |
| real_s = trainer.real_val_fid_stats["s"] | |
| # compute fake images | |
| fakes = compute_fakes(trainer) | |
| if verbose > 0: | |
| print("Computing fake activation statistics") | |
| # get fake stats | |
| fake_m, fake_s = calculate_activation_statistics( | |
| fakes, model, batch_size=batch_size, dims=dims, device=trainer.device | |
| ) | |
| # compute FD between the real and the fake inception stats | |
| return calculate_frechet_distance(real_m, real_s, fake_m, fake_s) | |
| def set_real_val_fid_stats(trainer, model, batch_size, dims): | |
| """ | |
| Sets the real_val_fid_stats attribute of the trainer with the m and | |
| s outputs of calculate_activation_statistics on the real data. | |
| This needs to be done only once since nothing changes during training here. | |
| Args: | |
| trainer (climategan.Trainer): trainer instance to compute the stats for | |
| model (InceptionV3): inception model to get the activations from | |
| batch_size (int): inception inference batch size | |
| dims (int): dimension selected in the model | |
| """ | |
| # in the rf domain display_size may be different from fid.n_images | |
| limit = trainer.opts.train.fid.n_images | |
| display_x = torch.stack( | |
| [sample["data"]["x"] for sample in trainer.display_images["val"]["rf"][:limit]] | |
| ).to(trainer.device) | |
| m, s = calculate_activation_statistics( | |
| display_x, model, batch_size=batch_size, dims=dims, device=trainer.device | |
| ) | |
| trainer.real_val_fid_stats = {"m": m, "s": s} | |
| def compute_fakes(trainer, verbose=0): | |
| """ | |
| Compute current fake inferences | |
| Args: | |
| trainer (climategan.Trainer): trainer instance | |
| verbose (int, optional): Print level. Defaults to 0. | |
| Returns: | |
| torch.Tensor: trainer.opts.train.fid.n_images painted images | |
| """ | |
| # in the rf domain display_size may be different from fid.n_images | |
| n = trainer.opts.train.fid.n_images | |
| bs = trainer.opts.data.loaders.batch_size | |
| display_batches = [ | |
| (sample["data"]["x"], sample["data"]["m"]) | |
| for sample in trainer.display_images["val"]["rf"][:n] | |
| ] | |
| display_x = torch.stack([b[0] for b in display_batches]).to(trainer.device) | |
| display_m = torch.stack([b[0] for b in display_batches]).to(trainer.device) | |
| nbs = len(display_x) // bs + 1 | |
| fakes = [] | |
| for b in range(nbs): | |
| if verbose > 0: | |
| print("computing fakes {}/{}".format(b + 1, nbs), end="\r", flush=True) | |
| with torch.no_grad(): | |
| x = display_x[b * bs : (b + 1) * bs] | |
| m = display_m[b * bs : (b + 1) * bs] | |
| fake = trainer.G.paint(m, x) | |
| fakes.append(fake) | |
| return torch.cat(fakes, dim=0) | |
| def calculate_activation_statistics( | |
| images, model, batch_size=50, dims=2048, device="cpu" | |
| ): | |
| """Calculation of the statistics used by the FID. | |
| Params: | |
| -- images : List of images | |
| -- model : Instance of inception model | |
| -- batch_size : The images numpy array is split into batches with | |
| batch size batch_size. A reasonable batch size | |
| depends on the hardware. | |
| -- dims : Dimensionality of features returned by Inception | |
| -- device : Device to run calculations | |
| Returns: | |
| -- mu : The mean over samples of the activations of the pool_3 layer of | |
| the inception model. | |
| -- sigma : The covariance matrix of the activations of the pool_3 layer of | |
| the inception model. | |
| """ | |
| act = get_activations(images, model, batch_size, dims, device) | |
| mu = np.mean(act, axis=0) | |
| sigma = np.cov(act, rowvar=False) | |
| return mu, sigma | |
| def get_activations(images, model, batch_size=50, dims=2048, device="cpu"): | |
| """Calculates the activations of the pool_3 layer for all images. | |
| Params: | |
| -- images : List of images | |
| -- model : Instance of inception model | |
| -- batch_size : Batch size of images for the model to process at once. | |
| Make sure that the number of samples is a multiple of | |
| the batch size, otherwise some samples are ignored. This | |
| behavior is retained to match the original FID score | |
| implementation. | |
| -- dims : Dimensionality of features returned by Inception | |
| -- device : Device to run calculations | |
| Returns: | |
| -- A numpy array of dimension (num images, dims) that contains the | |
| activations of the given tensor when feeding inception with the | |
| query tensor. | |
| """ | |
| model.eval() | |
| pred_arr = np.empty((len(images), dims)) | |
| start_idx = 0 | |
| nbs = len(images) // batch_size + 1 | |
| for b in range(nbs): | |
| batch = images[b * batch_size : (b + 1) * batch_size].to(device) | |
| if not batch.nelement(): | |
| continue | |
| with torch.no_grad(): | |
| pred = model(batch)[0] | |
| # If model output is not scalar, apply global spatial average pooling. | |
| # This happens if you choose a dimensionality not equal 2048. | |
| if pred.size(2) != 1 or pred.size(3) != 1: | |
| pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) | |
| pred = pred.squeeze(3).squeeze(2).cpu().numpy() | |
| pred_arr[start_idx : start_idx + pred.shape[0]] = pred | |
| start_idx = start_idx + pred.shape[0] | |
| return pred_arr | |
| def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): | |
| """Numpy implementation of the Frechet Distance. | |
| The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) | |
| and X_2 ~ N(mu_2, C_2) is | |
| d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). | |
| Stable version by Dougal J. Sutherland. | |
| Params: | |
| -- mu1 : Numpy array containing the activations of a layer of the | |
| inception net (like returned by the function 'get_predictions') | |
| for generated samples. | |
| -- mu2 : The sample mean over activations, precalculated on an | |
| representative data set. | |
| -- sigma1: The covariance matrix over activations for generated samples. | |
| -- sigma2: The covariance matrix over activations, precalculated on an | |
| representative data set. | |
| Returns: | |
| -- : The Frechet Distance. | |
| """ | |
| mu1 = np.atleast_1d(mu1) | |
| mu2 = np.atleast_1d(mu2) | |
| sigma1 = np.atleast_2d(sigma1) | |
| sigma2 = np.atleast_2d(sigma2) | |
| assert ( | |
| mu1.shape == mu2.shape | |
| ), "Training and test mean vectors have different lengths" | |
| assert ( | |
| sigma1.shape == sigma2.shape | |
| ), "Training and test covariances have different dimensions" | |
| diff = mu1 - mu2 | |
| # Product might be almost singular | |
| covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) | |
| if not np.isfinite(covmean).all(): | |
| msg = ( | |
| "fid calculation produces singular product; " | |
| "adding %s to diagonal of cov estimates" | |
| ) % eps | |
| print(msg) | |
| offset = np.eye(sigma1.shape[0]) * eps | |
| covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) | |
| # Numerical error might give slight imaginary component | |
| if np.iscomplexobj(covmean): | |
| if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): | |
| m = np.max(np.abs(covmean.imag)) | |
| raise ValueError("Imaginary component {}".format(m)) | |
| covmean = covmean.real | |
| tr_covmean = np.trace(covmean) | |
| return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean | |