| | from collections import OrderedDict |
| | from typing import Tuple, Union, Optional |
| |
|
| | import numpy as np |
| | import torch |
| | import torch.nn.functional as F |
| | from torch import nn |
| | import math |
| |
|
| | import timm |
| | import torchgeo.models |
| | from torchgeo.models import ResNet18_Weights, ResNet50_Weights, ViTSmall16_Weights |
| | from location_encoder import get_positional_encoding, get_neural_network, LocationEncoder |
| | from datamodules.s2geo_dataset import S2Geo |
| |
|
| | class Bottleneck(nn.Module): |
| | expansion = 4 |
| |
|
| | def __init__(self, inplanes, planes, stride=1): |
| | super().__init__() |
| |
|
| | |
| | self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) |
| | self.bn1 = nn.BatchNorm2d(planes) |
| | self.relu1 = nn.ReLU(inplace=True) |
| |
|
| | self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) |
| | self.bn2 = nn.BatchNorm2d(planes) |
| | self.relu2 = nn.ReLU(inplace=True) |
| |
|
| | self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() |
| |
|
| | self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) |
| | self.bn3 = nn.BatchNorm2d(planes * self.expansion) |
| | self.relu3 = nn.ReLU(inplace=True) |
| |
|
| | self.downsample = None |
| | self.stride = stride |
| |
|
| | if stride > 1 or inplanes != planes * Bottleneck.expansion: |
| | |
| | self.downsample = nn.Sequential(OrderedDict([ |
| | ("-1", nn.AvgPool2d(stride)), |
| | ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), |
| | ("1", nn.BatchNorm2d(planes * self.expansion)) |
| | ])) |
| |
|
| | def forward(self, x: torch.Tensor): |
| | identity = x |
| |
|
| | out = self.relu1(self.bn1(self.conv1(x))) |
| | out = self.relu2(self.bn2(self.conv2(out))) |
| | out = self.avgpool(out) |
| | out = self.bn3(self.conv3(out)) |
| |
|
| | if self.downsample is not None: |
| | identity = self.downsample(x) |
| |
|
| | out += identity |
| | out = self.relu3(out) |
| | return out |
| |
|
| |
|
| | class AttentionPool2d(nn.Module): |
| | def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): |
| | super().__init__() |
| | self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) |
| | self.k_proj = nn.Linear(embed_dim, embed_dim) |
| | self.q_proj = nn.Linear(embed_dim, embed_dim) |
| | self.v_proj = nn.Linear(embed_dim, embed_dim) |
| | self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) |
| | self.num_heads = num_heads |
| |
|
| | def forward(self, x): |
| | x = x.flatten(start_dim=2).permute(2, 0, 1) |
| | x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) |
| | x = x + self.positional_embedding[:, None, :].to(x.dtype) |
| | x, _ = F.multi_head_attention_forward( |
| | query=x[:1], key=x, value=x, |
| | embed_dim_to_check=x.shape[-1], |
| | num_heads=self.num_heads, |
| | q_proj_weight=self.q_proj.weight, |
| | k_proj_weight=self.k_proj.weight, |
| | v_proj_weight=self.v_proj.weight, |
| | in_proj_weight=None, |
| | in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), |
| | bias_k=None, |
| | bias_v=None, |
| | add_zero_attn=False, |
| | dropout_p=0, |
| | out_proj_weight=self.c_proj.weight, |
| | out_proj_bias=self.c_proj.bias, |
| | use_separate_proj_weight=True, |
| | training=self.training, |
| | need_weights=False |
| | ) |
| | return x.squeeze(0) |
| |
|
| |
|
| | class ModifiedResNet(nn.Module): |
| | """ |
| | A ResNet class that is similar to torchvision's but contains the following changes: |
| | - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. |
| | - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 |
| | - The final pooling layer is a QKV attention instead of an average pool |
| | """ |
| |
|
| | def __init__(self, layers, output_dim, heads, input_resolution=224, width=64, in_channels=3): |
| | super().__init__() |
| | self.output_dim = output_dim |
| | self.input_resolution = input_resolution |
| |
|
| | |
| | self.conv1 = nn.Conv2d(in_channels, width // 2, kernel_size=3, stride=2, padding=1, bias=False) |
| | self.bn1 = nn.BatchNorm2d(width // 2) |
| | self.relu1 = nn.ReLU(inplace=True) |
| | self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) |
| | self.bn2 = nn.BatchNorm2d(width // 2) |
| | self.relu2 = nn.ReLU(inplace=True) |
| | self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) |
| | self.bn3 = nn.BatchNorm2d(width) |
| | self.relu3 = nn.ReLU(inplace=True) |
| | self.avgpool = nn.AvgPool2d(2) |
| |
|
| | |
| | self._inplanes = width |
| | self.layer1 = self._make_layer(width, layers[0]) |
| | self.layer2 = self._make_layer(width * 2, layers[1], stride=2) |
| | self.layer3 = self._make_layer(width * 4, layers[2], stride=2) |
| | self.layer4 = self._make_layer(width * 8, layers[3], stride=2) |
| |
|
| | embed_dim = width * 32 |
| | self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim) |
| |
|
| | def _make_layer(self, planes, blocks, stride=1): |
| | layers = [Bottleneck(self._inplanes, planes, stride)] |
| |
|
| | self._inplanes = planes * Bottleneck.expansion |
| | for _ in range(1, blocks): |
| | layers.append(Bottleneck(self._inplanes, planes)) |
| |
|
| | return nn.Sequential(*layers) |
| |
|
| | def forward(self, x): |
| | def stem(x): |
| | x = self.relu1(self.bn1(self.conv1(x))) |
| | x = self.relu2(self.bn2(self.conv2(x))) |
| | x = self.relu3(self.bn3(self.conv3(x))) |
| | x = self.avgpool(x) |
| | return x |
| |
|
| | x = x.type(self.conv1.weight.dtype) |
| | x = stem(x) |
| | x = self.layer1(x) |
| | x = self.layer2(x) |
| | x = self.layer3(x) |
| | x = self.layer4(x) |
| | x = self.attnpool(x) |
| |
|
| | return x |
| |
|
| |
|
| | class LayerNorm(nn.LayerNorm): |
| | """Subclass torch's LayerNorm to handle fp16.""" |
| |
|
| | def forward(self, x: torch.Tensor): |
| | orig_type = x.dtype |
| | ret = super().forward(x.type(torch.float32)) |
| | return ret.type(orig_type) |
| |
|
| |
|
| | class QuickGELU(nn.Module): |
| | def forward(self, x: torch.Tensor): |
| | return x * torch.sigmoid(1.702 * x) |
| |
|
| |
|
| | class ResidualAttentionBlock(nn.Module): |
| | def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): |
| | super().__init__() |
| |
|
| | self.attn = nn.MultiheadAttention(d_model, n_head) |
| | self.ln_1 = LayerNorm(d_model) |
| | self.mlp = nn.Sequential(OrderedDict([ |
| | ("c_fc", nn.Linear(d_model, d_model * 4)), |
| | ("gelu", QuickGELU()), |
| | ("c_proj", nn.Linear(d_model * 4, d_model)) |
| | ])) |
| | self.ln_2 = LayerNorm(d_model) |
| | self.attn_mask = attn_mask |
| |
|
| | def attention(self, x: torch.Tensor): |
| | self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None |
| | return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] |
| |
|
| | def forward(self, x: torch.Tensor): |
| | x = x + self.attention(self.ln_1(x)) |
| | x = x + self.mlp(self.ln_2(x)) |
| | return x |
| |
|
| |
|
| | class Transformer(nn.Module): |
| | def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): |
| | super().__init__() |
| | self.width = width |
| | self.layers = layers |
| | self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) |
| |
|
| | def forward(self, x: torch.Tensor): |
| | return self.resblocks(x) |
| |
|
| |
|
| | class VisionTransformer(nn.Module): |
| | def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, in_channels: int, output_dim: int): |
| | super().__init__() |
| | self.input_resolution = input_resolution |
| | self.output_dim = output_dim |
| | self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) |
| |
|
| | scale = width ** -0.5 |
| | self.class_embedding = nn.Parameter(scale * torch.randn(width)) |
| | self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) |
| | self.ln_pre = LayerNorm(width) |
| |
|
| | self.transformer = Transformer(width, layers, heads) |
| |
|
| | self.ln_post = LayerNorm(width) |
| | self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) |
| |
|
| | def forward(self, x: torch.Tensor): |
| | x = self.conv1(x) |
| | x = x.reshape(x.shape[0], x.shape[1], -1) |
| | x = x.permute(0, 2, 1) |
| | x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) |
| | x = x + self.positional_embedding.to(x.dtype) |
| | x = self.ln_pre(x) |
| |
|
| | x = x.permute(1, 0, 2) |
| | x = self.transformer(x) |
| | x = x.permute(1, 0, 2) |
| |
|
| | x = self.ln_post(x[:, 0, :]) |
| |
|
| | if self.proj is not None: |
| | x = x @ self.proj |
| |
|
| | return x |
| |
|
| | class SatCLIP(nn.Module): |
| | def __init__(self, |
| | embed_dim: int, |
| | |
| | image_resolution: int, |
| | vision_layers: Union[Tuple[int, int, int, int], int, str], |
| | vision_width: int, |
| | vision_patch_size: int, |
| | in_channels: int, |
| | |
| | le_type: str, |
| | pe_type: str, |
| | frequency_num: int, |
| | max_radius: int, |
| | min_radius: int, |
| | harmonics_calculation: str, |
| | legendre_polys: int=10, |
| | sh_embedding_dims: int=16, |
| | ffn: bool=True, |
| | num_hidden_layers: int=2, |
| | capacity: int=256, |
| | *args, |
| | **kwargs |
| | ): |
| | super().__init__() |
| | |
| | if isinstance(vision_layers, (tuple, list)): |
| | print('using modified resnet') |
| | vision_heads = vision_width * 32 // 64 |
| | self.visual = ModifiedResNet( |
| | layers=vision_layers, |
| | output_dim=embed_dim, |
| | heads=vision_heads, |
| | input_resolution=image_resolution, |
| | width=vision_width, |
| | in_channels=in_channels |
| | ) |
| | |
| | elif vision_layers == 'moco_resnet18': |
| | print('using pretrained moco resnet18') |
| | weights = ResNet18_Weights.SENTINEL2_ALL_MOCO |
| | in_chans = weights.meta["in_chans"] |
| | self.visual = timm.create_model("resnet18", in_chans=in_chans, num_classes=embed_dim) |
| | self.visual.load_state_dict(weights.get_state_dict(progress=True), strict=False) |
| | self.visual.requires_grad_(False) |
| | self.visual.fc.requires_grad_(True) |
| |
|
| | elif vision_layers == 'moco_resnet50': |
| | print('using pretrained moco resnet50') |
| | weights = ResNet50_Weights.SENTINEL2_ALL_MOCO |
| | in_chans = weights.meta["in_chans"] |
| | self.visual = timm.create_model("resnet50", in_chans=in_chans, num_classes=embed_dim) |
| | self.visual.load_state_dict(weights.get_state_dict(progress=True), strict=False) |
| | self.visual.requires_grad_(False) |
| | self.visual.fc.requires_grad_(True) |
| | |
| | elif vision_layers == 'moco_vit16': |
| | print('using pretrained moco vit16') |
| | weights = ViTSmall16_Weights.SENTINEL2_ALL_MOCO |
| | in_chans = weights.meta["in_chans"] |
| | self.visual = timm.create_model("vit_small_patch16_224", in_chans=in_chans, num_classes=embed_dim) |
| | self.visual.load_state_dict(weights.get_state_dict(progress=True), strict=False) |
| | self.visual.requires_grad_(False) |
| | self.visual.head.requires_grad_(True) |
| |
|
| | else: |
| | print('using vision transformer') |
| | vision_heads = vision_width // 64 |
| | self.visual = VisionTransformer( |
| | input_resolution=image_resolution, |
| | patch_size=vision_patch_size, |
| | width=vision_width, |
| | layers=vision_layers, |
| | heads=vision_heads, |
| | output_dim=embed_dim, |
| | in_channels=in_channels |
| | ) |
| | |
| | self.posenc = get_positional_encoding(name=le_type, harmonics_calculation=harmonics_calculation, legendre_polys=legendre_polys, min_radius=min_radius, max_radius=max_radius, frequency_num=frequency_num).double() |
| | self.nnet = get_neural_network(name=pe_type, input_dim=self.posenc.embedding_dim, num_classes=embed_dim, dim_hidden=capacity, num_layers=num_hidden_layers).double() |
| | self.location = LocationEncoder(self.posenc, |
| | self.nnet |
| | ).double() |
| | |
| | self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) |
| |
|
| | self.initialize_parameters() |
| |
|
| | def initialize_parameters(self): |
| | if isinstance(self.visual, ModifiedResNet): |
| | if self.visual.attnpool is not None: |
| | std = self.visual.attnpool.c_proj.in_features ** -0.5 |
| | nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) |
| | nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) |
| | nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) |
| | nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) |
| |
|
| | for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]: |
| | for name, param in resnet_block.named_parameters(): |
| | if name.endswith("bn3.weight"): |
| | nn.init.zeros_(param) |
| |
|
| | @property |
| | def dtype(self): |
| | if isinstance(self.visual, timm.models.vision_transformer.VisionTransformer): |
| | return self.visual.patch_embed.proj.weight.dtype |
| | else: |
| | return self.visual.conv1.weight.dtype |
| |
|
| | def encode_image(self, image): |
| | return self.visual(image.type(self.dtype)) |
| |
|
| | def encode_location(self, coords): |
| | return self.location(coords.double()) |
| |
|
| | def forward(self, image, coords): |
| |
|
| | image_features = self.encode_image(image) |
| | location_features = self.encode_location(coords).float() |
| | |
| | image_features = image_features / image_features.norm(dim=1, keepdim=True) |
| | location_features = location_features / location_features.norm(dim=1, keepdim=True) |
| |
|
| | |
| | logit_scale = self.logit_scale.exp() |
| | logits_per_image = logit_scale * image_features @ location_features.t() |
| | logits_per_location = logits_per_image.t() |
| |
|
| | |
| | return logits_per_image, logits_per_location |
| |
|
| | def convert_weights(model: nn.Module): |
| | """Convert applicable model parameters to fp16""" |
| |
|
| | def _convert_weights_to_fp16(l): |
| | if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): |
| | l.weight.data = l.weight.data.half() |
| | if l.bias is not None: |
| | l.bias.data = l.bias.data.half() |
| |
|
| | if isinstance(l, nn.MultiheadAttention): |
| | for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: |
| | tensor = getattr(l, attr) |
| | if tensor is not None: |
| | tensor.data = tensor.data.half() |
| |
|
| | for name in ["text_projection", "proj"]: |
| | if hasattr(l, name): |
| | attr = getattr(l, name) |
| | if attr is not None: |
| | attr.data = attr.data.half() |
| |
|
| | model.apply(_convert_weights_to_fp16) |