MHasanUnical commited on
Commit
78f4323
·
verified ·
1 Parent(s): b8ae144

Upload Model.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. Model.py +261 -0
Model.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import torchvision
5
+ from torchvision import transforms
6
+ import torch.nn.functional as F
7
+ from transformers import PreTrainedModel
8
+ ######################################################################
9
+ # IMAGE DOWN SAMPLING
10
+ ######################################################################
11
+ class ImageDownSampling(nn.Module):
12
+ def __init__(self, height, width, scale):
13
+ super().__init__()
14
+ self.resize = transforms.Resize(size=(height//scale, width//scale))
15
+
16
+ def forward(self, x):
17
+ return self.resize(x)
18
+
19
+ ######################################################################
20
+ # IMAGE SHARPENING
21
+ ######################################################################
22
+ class ImageSharp(nn.Module):
23
+ def __init__(self):
24
+ super(ImageSharp, self).__init__()
25
+
26
+ def forward(self, x):
27
+ B, C, H, W = x.shape
28
+ device = x.device
29
+ # Sharpening kernel: basic 3x3
30
+ kernel = torch.tensor([[[[0, -1, 0],
31
+ [-1, 5, -1],
32
+ [0, -1, 0]]]], dtype=torch.float32, device=device) # (1, 1, 3, 3)
33
+ # Apply the kernel using group convolution (one group per channel)
34
+ kernel = kernel.repeat(C, 1, 1, 1) # (C, 1, 3, 3) --> here C=1, so it's still (1, 1, 3, 3)
35
+
36
+ # Apply convolution
37
+ sharpened = F.conv2d(x, kernel, padding=1, groups=C) # padding=1 keeps same spatial size
38
+
39
+ # Clamp to stay within valid image range
40
+ sharpened = torch.clamp(sharpened, 0, 1)
41
+
42
+ return sharpened
43
+
44
+ ######################################################################
45
+ # IMAGE PATCHING
46
+ ######################################################################
47
+ class ImagePatching(nn.Module):
48
+ def __init__(self, patch_size: int):
49
+ super(ImagePatching, self).__init__()
50
+ self.patch_size = patch_size
51
+ self.image_patch = nn.Unfold(kernel_size=patch_size, stride=patch_size)
52
+ self.image_sharp = ImageSharp()
53
+
54
+ def forward(self, x):
55
+ batch_size, channels, height, width = x.shape
56
+ x = self.image_sharp(x)
57
+ x = self.image_patch(x)
58
+ x = x.transpose(1, 2).contiguous()
59
+ x = x.view(-1, height // self.patch_size, width // self.patch_size, channels, self.patch_size, self.patch_size)
60
+ x = x.view(-1, channels, self.patch_size, self.patch_size)
61
+ return x
62
+
63
+ ######################################################################
64
+ # DOUBLE CONVOLUTION LAYER
65
+ ######################################################################
66
+ class DoubleConvLayer(nn.Module):
67
+ def __init__(self, in_feature: int, out_feature: int):
68
+ super(DoubleConvLayer, self).__init__()
69
+ self.double_conv_layer = nn.Sequential(
70
+ nn.Conv2d(in_channels=in_feature, out_channels=out_feature, kernel_size=3, padding=1),
71
+ nn.InstanceNorm2d(num_features=out_feature),
72
+ nn.LeakyReLU(inplace=True),
73
+ nn.Conv2d(in_channels=out_feature, out_channels=out_feature, kernel_size=3, padding=1),
74
+ nn.InstanceNorm2d(num_features=out_feature),
75
+ nn.LeakyReLU(inplace=True)
76
+ )
77
+
78
+ def forward(self, x):
79
+ return self.double_conv_layer(x)
80
+
81
+ ######################################################################
82
+ # FEATURE EXTRACTION FROM ENCODER PART
83
+ ######################################################################
84
+ class EncoderFetureExtraction(nn.Module):
85
+ def __init__(self, feature: int):
86
+ super(EncoderFetureExtraction, self).__init__()
87
+
88
+ self.feature_extraction = nn.Sequential(
89
+ nn.Conv2d(in_channels=feature, out_channels=1, kernel_size=1, stride=1),
90
+ nn.InstanceNorm2d(num_features=1),
91
+ nn.LeakyReLU(inplace=True),
92
+ nn.Sigmoid()
93
+ )
94
+
95
+ self.relu = nn.LeakyReLU()
96
+
97
+ def forward(self, x):
98
+ x1 = self.feature_extraction(x)
99
+ return x * x1
100
+
101
+
102
+ ######################################################################
103
+ # BOTTLENECK LAYER OF THE MODEL
104
+ ######################################################################
105
+ class BottleNeck(nn.Module):
106
+ def __init__(self, in_ch, out_ch):
107
+ super(BottleNeck, self).__init__()
108
+ self.bottleneck = nn.Sequential(
109
+ nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=1),
110
+ nn.InstanceNorm2d(num_features=out_ch),
111
+ nn.LeakyReLU(inplace=True)
112
+ )
113
+
114
+ def forward(self, x):
115
+ return self.bottleneck(x)
116
+
117
+
118
+ ######################################################################
119
+ # SOFT-ATTENTION IN DECODER LAYER
120
+ ######################################################################
121
+ class AttentionGate(nn.Module):
122
+ def __init__(self, dim_g, dim_x, dim_l):
123
+ super(AttentionGate, self).__init__()
124
+ self.Wg = nn.Sequential(
125
+ nn.Conv2d(in_channels=dim_g, out_channels=dim_l, kernel_size=1, stride=1),
126
+ nn.BatchNorm2d(num_features=dim_l))
127
+
128
+ self.Wx = nn.Sequential(
129
+ nn.Conv2d(in_channels=dim_x, out_channels=dim_l, kernel_size=1, stride=1),
130
+ nn.BatchNorm2d(num_features=dim_l))
131
+
132
+ self.alpha_conv = nn.Sequential(
133
+ nn.Conv2d(in_channels=dim_l, out_channels=1, kernel_size=1, stride=1),
134
+ nn.BatchNorm2d(num_features=1),
135
+ nn.Sigmoid())
136
+
137
+ self.up_conv = nn.ConvTranspose2d(in_channels=dim_g, out_channels=dim_g,
138
+ kernel_size=2, stride=2)
139
+
140
+ self.relu = nn.ReLU()
141
+
142
+ def forward(self, encoder_tensor, decoder_tensor):
143
+ # g > x, g is decoder, x is encoder
144
+ g = self.up_conv(decoder_tensor) # [b, 512, 32, 32]
145
+ w_x = self.Wx(encoder_tensor) # [b, 128, 32 ,32]
146
+ w_g = self.Wg(g) # [b, 128, 32, 32]
147
+
148
+ alpha = self.alpha_conv(self.relu(w_x + w_g))
149
+
150
+ return encoder_tensor * alpha
151
+
152
+
153
+ ######################################################################
154
+ # IMAGE RECONSTRUCTION FROM PATCH
155
+ ######################################################################
156
+ class ImageFolding(nn.Module):
157
+ def __init__(self, image_size: int, patch_size: int, batch_size: int):
158
+ super(ImageFolding, self).__init__()
159
+ self.num_patches = image_size // patch_size
160
+ self.batch_size = batch_size
161
+ self.folding = nn.Fold(output_size=(image_size, image_size),
162
+ kernel_size=(patch_size, patch_size),
163
+ stride=(patch_size, patch_size))
164
+
165
+ def forward(self, x):
166
+ x1 = x.view(self.batch_size, self.num_patches * self.num_patches, -1)
167
+ x1 = x1.transpose(1, 2).contiguous()
168
+ x1 = self.folding(x1)
169
+ return x1
170
+
171
+ ######################################################################
172
+ # ENCODER LAYERS
173
+ ######################################################################
174
+ class Encoder(nn.Module):
175
+ def __init__(self, in_channel, out_channel, enc_fet_ch, max_pool_size, is_concate=False):
176
+ super().__init__()
177
+ self.double_conv = DoubleConvLayer(in_feature=in_channel, out_feature=out_channel)
178
+ self.enc_feature_extraction = EncoderFetureExtraction(feature=enc_fet_ch)
179
+ self.pooling_layer = nn.MaxPool2d(kernel_size=max_pool_size, stride=max_pool_size)
180
+ self.concat = is_concate
181
+
182
+ def forward(self, x, concat_tensor=None):
183
+ x = self.double_conv(x)
184
+ if self.concat:
185
+ x = torch.cat([concat_tensor, x], dim=1)
186
+ skip_connection = self.enc_feature_extraction(x)
187
+ x = self.pooling_layer(x)
188
+ return x, skip_connection
189
+
190
+
191
+ ######################################################################
192
+ # Decoder LAYERS
193
+ ######################################################################
194
+ class Decoder(nn.Module):
195
+ def __init__(self, tensor_dim_encoder, tensor_dim_decoder, tensor_dim_mid, up_conv_in_ch, up_conv_out_ch, up_conv_scale, dconv_in_feature, dconv_out_feature, is_concat=False):
196
+ super().__init__()
197
+ self.soft_attention = AttentionGate(dim_g=tensor_dim_decoder, dim_x=tensor_dim_encoder, dim_l=tensor_dim_mid)
198
+ self.up_conv = nn.ConvTranspose2d(in_channels=up_conv_in_ch, out_channels=up_conv_out_ch, kernel_size=up_conv_scale, stride=up_conv_scale)
199
+ self.double_conv = DoubleConvLayer(in_feature=dconv_in_feature, out_feature=dconv_out_feature)
200
+ self.concat = is_concat
201
+
202
+ def forward(self, encoder_tensor, decoder_tensor):
203
+ x = self.soft_attention(encoder_tensor, decoder_tensor)
204
+ y = self.up_conv(decoder_tensor)
205
+ if self.concat:
206
+ x = torch.cat([x, y], dim=1)
207
+ x = self.double_conv(x)
208
+ return x
209
+
210
+ class VesselSegmentModel(PreTrainedModel):
211
+ config_class = VesselSegmentConfig
212
+ def __init__(self, config: VesselSegmentConfig=VesselSegmentConfig()):
213
+ super().__init__(config)
214
+ # image patch
215
+ self.img_patch = ImagePatching(patch_size=config.patch_size)
216
+
217
+ # image downsampling
218
+ self.img_down_sampling_1 = ImageDownSampling(height=config.patch_size, width=config.patch_size, scale=2)
219
+ self.img_down_sampling_2 = ImageDownSampling(height=config.patch_size, width=config.patch_size, scale=4)
220
+
221
+ # encoder layers
222
+ self.encoder_layer_1 = Encoder(config.input_channels, config.features[0], enc_fet_ch=config.features[0], max_pool_size=2, is_concate=False)
223
+ self.encoder_layer_2 = Encoder(config.input_channels, config.features[1], enc_fet_ch=config.features[0]*2, max_pool_size=2, is_concate=True)
224
+ self.encoder_layer_3 = Encoder(config.input_channels, config.features[2], enc_fet_ch=config.features[0]*4, max_pool_size=2, is_concate=True)
225
+
226
+ # bottle-neck layer
227
+ self.bottleneck = BottleNeck(in_ch=config.features[2]*2, out_ch=config.features[2]*4)
228
+
229
+ # decoder layers
230
+ self.decoder_layer_1 = Decoder(tensor_dim_decoder=config.features[-1]*4, tensor_dim_encoder=config.features[-1]*2, tensor_dim_mid=config.features[0], up_conv_in_ch=config.features[-1]*4, up_conv_out_ch=config.features[-1]*2, up_conv_scale=2, dconv_in_feature=config.features[-1]*4, dconv_out_feature=config.features[-1]*2, is_concat=True)
231
+ self.decoder_layer_2 = Decoder(tensor_dim_decoder=config.features[-1]*2, tensor_dim_encoder=config.features[-1], tensor_dim_mid=config.features[1], up_conv_in_ch=config.features[-1]*2, up_conv_out_ch=config.features[-1], up_conv_scale=2, dconv_in_feature=config.features[-1]*2, dconv_out_feature=config.features[-1], is_concat=True)
232
+ self.decoder_layer_3 = Decoder(tensor_dim_decoder=config.features[-1], tensor_dim_encoder=config.features[-2], tensor_dim_mid=config.features[2], up_conv_in_ch=config.features[-1], up_conv_out_ch=config.features[-2], up_conv_scale=2, dconv_in_feature=config.features[-1], dconv_out_feature=config.features[-2], is_concat=True)
233
+
234
+ # Segmentation Head
235
+ self.segmenation_head = nn.Sequential(
236
+ nn.Conv2d(in_channels=config.features[-3], out_channels=config.num_classes, kernel_size=1, padding=0, stride=1),
237
+ ImageFolding(image_size=config.image_size[0], patch_size=config.patch_size, batch_size=config.batch_size)
238
+ )
239
+
240
+ def forward(self, x):
241
+ IMG_1 = self.img_patch(x)
242
+ IMG_2 = self.img_down_sampling_1(IMG_1)
243
+ IMG_3 = self.img_down_sampling_2(IMG_2)
244
+
245
+ # encoder
246
+ e1, sk1 = self.encoder_layer_1(IMG_1, None)
247
+ e2, sk2 = self.encoder_layer_2(IMG_2, e1)
248
+ e3, sk3 = self.encoder_layer_3(IMG_3, e2)
249
+
250
+ # bottleneck
251
+ b = self.bottleneck(e3)
252
+
253
+ # decoder
254
+ d1 = self.decoder_layer_1(sk3, b)
255
+ d2 = self.decoder_layer_2(sk2, d1)
256
+ d3 = self.decoder_layer_3(sk1, d2)
257
+
258
+ # head
259
+ head = self.segmenation_head(d3)
260
+
261
+ return head