94 lines
3.6 KiB
Python
94 lines
3.6 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
from .submodule import *
|
|
|
|
class BasicMotionEncoder(nn.Module):
|
|
def __init__(self):
|
|
super(BasicMotionEncoder, self).__init__()
|
|
self.corr_levels = 2
|
|
self.corr_radius = 4
|
|
|
|
cor_planes = 2 * self.corr_levels * (2*self.corr_radius + 1)
|
|
|
|
self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0)
|
|
self.convc2 = nn.Conv2d(64, 64, 3, padding=1)
|
|
self.convd1 = nn.Conv2d(1, 64, 7, padding=3)
|
|
self.convd2 = nn.Conv2d(64, 64, 3, padding=1)
|
|
self.conv = nn.Conv2d(64+64, 128-1, 3, padding=1)
|
|
|
|
def forward(self, disp, corr):
|
|
cor = F.relu(self.convc1(corr))
|
|
cor = F.relu(self.convc2(cor))
|
|
disp_ = F.relu(self.convd1(disp))
|
|
disp_ = F.relu(self.convd2(disp_))
|
|
|
|
cor_disp = torch.cat([cor, disp_], dim=1)
|
|
out = F.relu(self.conv(cor_disp))
|
|
return torch.cat([out, disp], dim=1)
|
|
|
|
class ConvGRU(nn.Module):
|
|
def __init__(self, hidden_dim, input_dim, kernel_size=3):
|
|
super(ConvGRU, self).__init__()
|
|
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
|
|
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
|
|
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
|
|
|
|
def forward(self, h, *x_list):
|
|
x = torch.cat(x_list, dim=1)
|
|
hx = torch.cat([h, x], dim=1)
|
|
z = torch.sigmoid(self.convz(hx))
|
|
r = torch.sigmoid(self.convr(hx))
|
|
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
|
|
h = (1-z) * h + z * q
|
|
return h
|
|
|
|
class DispHead(nn.Module):
|
|
def __init__(self, input_dim=128, hidden_dim=256, output_dim=1):
|
|
super(DispHead, self).__init__()
|
|
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
|
|
self.conv2 = nn.Conv2d(hidden_dim, output_dim, 3, padding=1)
|
|
self.relu = nn.ReLU(inplace=True)
|
|
|
|
def forward(self, x):
|
|
return self.conv2(self.relu(self.conv1(x)))
|
|
|
|
class BasicMultiUpdateBlock(nn.Module):
|
|
def __init__(self, hidden_dims=[]):
|
|
super().__init__()
|
|
self.n_gru_layers = 3
|
|
self.n_downsample = 2
|
|
self.encoder = BasicMotionEncoder()
|
|
encoder_output_dim = 128
|
|
|
|
self.gru04 = ConvGRU(hidden_dims[2], encoder_output_dim + hidden_dims[1] * (self.n_gru_layers > 1))
|
|
self.gru08 = ConvGRU(hidden_dims[1], hidden_dims[0] * (self.n_gru_layers == 3) + hidden_dims[2])
|
|
self.gru16 = ConvGRU(hidden_dims[0], hidden_dims[1])
|
|
self.disp_head = DispHead(hidden_dims[2], hidden_dim=256, output_dim=1)
|
|
factor = 2**self.n_downsample
|
|
|
|
self.mask_feat_4 = nn.Sequential(
|
|
nn.Conv2d(hidden_dims[2], 32, 3, padding=1),
|
|
nn.ReLU(inplace=True))
|
|
|
|
def forward(self, net, corr=None, disp=None, iter04=True, iter08=True, iter16=True, update=True):
|
|
if iter16:
|
|
net[2] = self.gru16(net[2], pool2x(net[1]))
|
|
if iter08:
|
|
if self.n_gru_layers > 2:
|
|
net[1] = self.gru08(net[1], pool2x(net[0]), interp(net[2], net[1]))
|
|
else:
|
|
net[1] = self.gru08(net[1], pool2x(net[0]))
|
|
if iter04:
|
|
motion_features = self.encoder(disp, corr)
|
|
if self.n_gru_layers > 1:
|
|
net[0] = self.gru04(net[0], motion_features, interp(net[1], net[0]))
|
|
else:
|
|
net[0] = self.gru04(net[0], motion_features)
|
|
|
|
if not update:
|
|
return net
|
|
|
|
delta_disp = self.disp_head(net[0])
|
|
mask_feat_4 = self.mask_feat_4(net[0])
|
|
return net, mask_feat_4, delta_disp |