1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
| | diff --git a/gfpgan/archs/gfpganv1_arch.py b/gfpgan/archs/gfpganv1_arch.py
index eaf3162..34ae5a2 100644
--- a/gfpgan/archs/gfpganv1_arch.py
+++ b/gfpgan/archs/gfpganv1_arch.py
@@ -3,7 +3,6 @@ import random
import torch
from basicsr.archs.stylegan2_arch import (ConvLayer, EqualConv2d, EqualLinear, ResBlock, ScaledLeakyReLU,
StyleGAN2Generator)
-from basicsr.ops.fused_act import FusedLeakyReLU
from basicsr.utils.registry import ARCH_REGISTRY
from torch import nn
from torch.nn import functional as F
@@ -170,10 +169,7 @@ class ConvUpLayer(nn.Module):
# activation
if activate:
- if bias:
- self.activation = FusedLeakyReLU(out_channels)
- else:
- self.activation = ScaledLeakyReLU(0.2)
+ self.activation = ScaledLeakyReLU(0.2)
else:
self.activation = None
diff --git a/gfpgan/archs/stylegan2_bilinear_arch.py b/gfpgan/archs/stylegan2_bilinear_arch.py
index 1342ee3..5cffb44 100644
--- a/gfpgan/archs/stylegan2_bilinear_arch.py
+++ b/gfpgan/archs/stylegan2_bilinear_arch.py
@@ -1,7 +1,6 @@
import math
import random
import torch
-from basicsr.ops.fused_act import FusedLeakyReLU, fused_leaky_relu
from basicsr.utils.registry import ARCH_REGISTRY
from torch import nn
from torch.nn import functional as F
@@ -190,7 +189,7 @@ class StyleConv(nn.Module):
sample_mode=sample_mode,
interpolation_mode=interpolation_mode)
self.weight = nn.Parameter(torch.zeros(1)) # for noise injection
- self.activate = FusedLeakyReLU(out_channels)
+ self.activate = ScaledLeakyReLU()
def forward(self, x, style, noise=None):
# modulate
@@ -568,10 +567,7 @@ class ConvLayer(nn.Sequential):
and not activate))
# activation
if activate:
- if bias:
- layers.append(FusedLeakyReLU(out_channels))
- else:
- layers.append(ScaledLeakyReLU(0.2))
+ layers.append(ScaledLeakyReLU(0.2))
super(ConvLayer, self).__init__(*layers)
|