<output id="qn6qe"></output>

    1. <output id="qn6qe"><tt id="qn6qe"></tt></output>
    2. <strike id="qn6qe"></strike>

      亚洲 日本 欧洲 欧美 视频,日韩中文字幕有码av,一本一道av中文字幕无码,国产线播放免费人成视频播放,人妻少妇偷人无码视频,日夜啪啪一区二区三区,国产尤物精品自在拍视频首页,久热这里只有精品12

      DAST 代碼分析

      DA部分

      輸入圖片大小:

      images.size: torch.Size([1, 3, 512, 1024])
      labels.size: torch.Size([1, 512, 1024])

      input_size = (w, h) # input_size : <class 'tuple'>: (1024, 512)

      input_size_target = (w, h) # <class 'tuple'>: (1024, 512)

       

      分割后的特征圖大小:

      feat_source: ([1, 2048, 65, 129])
      pred_source: ([1, 19, 65, 129])

      pred_source = interp(pred_source) 上采樣后 pred_source 大小變成: ([1, 19, 512, 1024])

       




      創建網絡:
      1 model = DeeplabMulti(num_classes=args.num_classes)
      2 def DeeplabMulti(num_classes=21):
      3     model = ResNetMulti(Bottleneck, [3, 4, 23, 2, 1], num_classes)
      4     return model

       

      包含注意力的分割網絡:
      1
      class ResNetMulti(nn.Module): 2 3 def forward(self, x, D, domain): # 源域進來就正常打分, 目標域進來就先加權后打分 4 x = self.conv1(x) 5 x = self.bn1(x) 6 x = self.relu(x) 7 x = self.maxpool(x) 8 x1 = self.layer1(x) 9 x2 = self.layer2(x1) 10 x3 = self.layer3(x2) 11 x4 = self.layer4(x3) # ft或者fs 12 if domain == 'source': # source:x4.size: torch.Size([1, 2048, 65, 129]) out.size: torch.Size([1, 19, 65, 129]) 13 x4_a4 = x4 14 # 目標域 注意力圖加權 15 if domain == 'target': # target:x4.size: torch.Size([1, 2048, 65, 129]) out.size: torch.Size([1, 19, 65, 129]) 16 a4 = D[0](x4) #a4 等同于論文中的D(ft) 注意力圖 17 a4 = self.tanh(a4) # 防止早期訓練時梯度爆炸,tanh激活層作為正則化層 18 a4 = torch.abs(a4) # 絕對值 a4 = |D(ft)| 19 a4_big = a4.expand(x4.size()) # 即a',為了匹配目標域的維度,實現注意力圖和目標域按元素相乘 20 x4_a4 = a4_big*x4 + x4 # ft'=ft+ft*a' 21 x5 = self.layer5(x4_a4) 22 out = self.layer6(x5) 23 # print('D[0]',D[0]) 24 # print('domain:', domain) 25 # print('x4.size:', x4.size()) # x4.size: torch.Size([1, 2048, 65, 129]) 26 # print('out.size:', out.size()) # out.size: torch.Size([1, 19, 65, 129]) 27 return x4, out

       

      判別器:(FCDiscriminator輸入通道2048,而OutspaceDiscriminator輸入通道是19
      model_D = nn.ModuleList([FCDiscriminator(num_classes=num_class_list[i]).train().to( device) if i < 1 else OutspaceDiscriminator(num_classes=num_class_list[i]).train().to(device) for i in range(2)])
      class FCDiscriminator(nn.Module):
      def __init__(self, num_classes, ndf = 64):
      # print('num_classes:', num_classes) num_classes: 2048
      super(FCDiscriminator, self).__init__()
      self.conv1 = nn.Conv2d(num_classes, num_classes//2, kernel_size=3, stride=1, padding=1)
      self.conv2 = nn.Conv2d(num_classes//2, num_classes//4, kernel_size=3, stride=1, padding=1)
      self.conv3 = nn.Conv2d(num_classes//4, num_classes//8, kernel_size=3, stride=1, padding=1)
      self.classifier = nn.Conv2d(num_classes//8, 1, kernel_size=3, stride=1, padding=1)
      self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
      #self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear')
      #self.sigmoid = nn.Sigmoid()
      def forward(self, x):
      x = self.conv1(x)
      x = self.leaky_relu(x)
      x = self.conv2(x)
      x = self.leaky_relu(x)
      x = self.conv3(x)
      x = self.leaky_relu(x)
      x = self.classifier(x)
      #x = self.up_sample(x)
      #x = self.sigmoid(x)
      return x
      class OutspaceDiscriminator(nn.Module):
      def __init__(self, num_classes, ndf = 64):
      super(OutspaceDiscriminator, self).__init__()
      self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1)
      self.conv2 = nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1)
      self.conv3 = nn.Conv2d(ndf*2, ndf*4, kernel_size=4, stride=2, padding=1)
      self.conv4 = nn.Conv2d(ndf*4, ndf*8, kernel_size=4, stride=2, padding=1)
      self.classifier = nn.Conv2d(ndf*8, 1, kernel_size=4, stride=2, padding=1) # 變成通道數為1
      self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
      #self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear')
      #self.sigmoid = nn.Sigmoid()
      def forward(self, x):
      x = self.conv1(x)
      x = self.leaky_relu(x)
      x = self.conv2(x)
      x = self.leaky_relu(x)
      x = self.conv3(x)
      x = self.leaky_relu(x)
      x = self.conv4(x)
      x = self.leaky_relu(x)
      x = self.classifier(x)
      #x = self.up_sample(x)
      #x = self.sigmoid(x)
      return x
       
      
      
      1 # D[0](x4):
      2     # tensor([[[[0.0710, 0.1864, 0.2138, ..., 0.2505, 0.1997, 0.1675],
      3     #           [0.0946, 0.2139, 0.2130, ..., 0.2979, 0.2266, 0.1543],
      4     #           [0.1402, 0.2508, 0.2545, ..., 0.3649, 0.3104, 0.1574],
      5     #           ...,
      6     #           [0.1940, 0.3481, 0.3824, ..., 0.3082, 0.2303, 0.1237],
      7     #           [0.1855, 0.2981, 0.3047, ..., 0.2617, 0.1878, 0.0770],
      8     #           [0.0597, 0.1503, 0.1717, ..., 0.1718, 0.1432, 0.0634]]]],
      9     #        device='cuda:0', grad_fn= < AddBackward0 >)
      
      
      1     # D[0]:
      2     # FCDiscriminator(
      3     # (conv1): Conv2d(2048, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      4     # (conv2): Conv2d(1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      5     # (conv3): Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      6     # (classifier): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      7     # (leaky_relu): LeakyReLU(negative_slope=0.2, inplace=True)
      8     # )
      1     # model_D[1]: OutspaceDiscriminator(
      2     #  (conv1): Conv2d(19, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
      3     # (conv2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
      4     # (conv3): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
      5     # (conv4): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
      6     # (classifier): Conv2d(512, 1, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
      7     # (leaky_relu): LeakyReLU(negative_slope=0.2, inplace=True)
      8     # )

      #######開始訓練######

      train S

      # train with source
      feat_source, pred_source = model(images, model_D, 'source')
      # ResNet返回兩層輸出結果(特征圖)x4out   model_D判別器模型,用來對resnetx4層作輸出,得到注意力圖
      # print('feat_source',feat_source) feat_source=x4 倒數第二層(輸出通道數:2048) pred_source=out 最后一層(輸出通道數:19(類別數)) 如果是目標域圖片,那么,out代表加權后的特征圖輸出。
      pred_source = interp(pred_source) # 源域特征圖 參與分割損失計算
      loss_seg = seg_loss(pred_source, labels)
      loss_seg.backward()
      
      
      # train with target
      
      
           feat_target, pred_target = model(images, model_D, 'target')
              # print('feat_target.size, pred_target.size:', feat_target.size(), pred_target.size())
              # feat_target.size, pred_target.size: torch.Size([1, 2048, 65, 129]) torch.Size([1, 19, 65, 129])
              pred_target = interp_target(pred_target)
              loss_adv = 0
              D_out = model_D[0](feat_target)  # 對倒數第二層的T域特征圖打分
              loss_adv += bce_loss(D_out, torch.FloatTensor(D_out.data.size()).fill_(source_label).to(device))
              D_out = model_D[1](F.softmax(pred_target, dim=1))  # 先把最后一層特征圖變成概率圖,再對概率圖打分
              # print('model_D[1]:', model_D[1])
              loss_adv += bce_loss(D_out, torch.FloatTensor(D_out.data.size()).fill_(source_label).to(device))
              loss_adv = loss_adv * 0.01
              loss_adv.backward()
              optimizer.step()
      
      

      train D

      
      
      # train with source
      
      
           loss_D_source = 0
              D_out_source = model_D[0](feat_source.detach())
              loss_D_source += bce_loss(D_out_source,
                                        torch.FloatTensor(D_out_source.data.size()).fill_(source_label).to(device))
              D_out_source = model_D[1](F.softmax(pred_source.detach(), dim=1))
              loss_D_source += bce_loss(D_out_source,
                                        torch.FloatTensor(D_out_source.data.size()).fill_(source_label).to(device))
              loss_D_source.backward()
      
      
      # train with target

              loss_D_target = 0
              D_out_target = model_D[0](feat_target.detach())
              loss_D_target += bce_loss(D_out_target,
                                        torch.FloatTensor(D_out_target.data.size()).fill_(target_label).to(device))
              D_out_target = model_D[1](F.softmax(pred_target.detach(), dim=1))
              loss_D_target += bce_loss(D_out_target,
                                        torch.FloatTensor(D_out_target.data.size()).fill_(target_label).to(device))
              loss_D_target.backward()
              optimizer_D.step()

       

      ST部分

       




      images.size: torch.Size([1, 3, 512, 1024])
      posted @ 2022-07-01 07:39  ethan178  Views(110)  Comments(0)    收藏  舉報
      主站蜘蛛池模板: 最近最好的2019中文| 国产成人精品久久综合| 国产欧美VA天堂在线观看视频 | 免费无码又爽又刺激网站| 南雄市| 精品国产精品中文字幕| 日韩女同一区二区三区久久| 精品亚洲国产成人av制服| 班玛县| 久久丫精品久久丫| 九九在线精品国产| 国产97色在线 | 免费| 无码国产偷倩在线播放| 国产亚洲精久久久久久久91| 日韩中文字幕国产精品| 成人无码午夜在线观看| 国产激情av一区二区三区| 少妇私密会所按摩到高潮呻吟 | 国内外成人综合免费视频| 国产免费午夜福利片在线| 悠悠人体艺术视频在线播放| 亚洲爆乳少妇无码激情| 色道久久综合亚洲精品蜜桃 | 亚洲天堂伊人久久a成人| 久久99国产精品尤物| 亚洲欧洲日韩国内高清| 欧美精品亚洲精品日韩专| 亚洲高清WWW色好看美女| 精品自拍自产一区二区三区| 国产一区二区三区高清在线观看| 日本无遮挡真人祼交视频| 国产一级av在线播放| 亚洲av无码成人精品区一区| 日韩精品一区二区三区激情视频 | 少妇人妻综合久久中文字幕| 亚洲老女人区一区二视频| 成年午夜免费韩国做受视频| 亚洲av色香蕉一区二区| 一出一进一爽一粗一大视频| 国产成人精品1024免费下载| 九九热在线视频观看最新|