diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index dfe0770..0000000
--- a/.gitattributes
+++ /dev/null
@@ -1,2 +0,0 @@
-# Auto detect text files and perform LF normalization
-* text=auto
diff --git a/README.md b/README.md
deleted file mode 100644
index 67cfe8d..0000000
--- a/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# Hair Color Chagine:
-
-## Original Pictures:
-
-
-
----------------------------------------------------------------------
-## Changed Pictures:
-
-
-
-
-## requirments:
- pip install torch
- pip install numpy
- pip install Pillow
- pip install opencv-python
- pip install torchvision
-## run.py
- 1. You can change the last line of the run.py as you want.
- For example : evaluate(input_path='files/1.JPG', output_path='files/1_black.jpg', mod='gold')
- input_path : input image path
- output_path : output image path
- mode : color of the hair (There are 3 types : black, gold, red)
- 2. Run run.py
- python run.py
-## How to run this file?
- This code is used for facial image parsing, which involves dividing an input image into different regions based on the different features present in the image.
- The code starts by importing necessary libraries including PyTorch, Numpy, PIL (Python Imaging Library), and OpenCV.
-
- The similar function calculates the similarity of two colors by comparing their Green (G), Blue (B), and Red (R) values.
- If the difference between G, B or R value of both colors exceeds a certain threshold, then the function returns false, indicating that the colors are not similar.
- Otherwise, it calculates the ratio between each color component (G/B, B/R, and G/R) and appends them to an array ar. If the length of this array is less than 1,
- it means that the function was not able to calculate any ratios and returns False.
- Otherwise, it checks if the minimum value in the array is equal to zero, if yes, it returns False since division by zero is not defined.
- Finally, it calculates the brightness ratio (br) between the two colors and checks if the maximum ratio in the array divided by the minimum ratio is less than 1.05
- and the brightness ratio is between 0.7 and 1.4, if yes, it returns true, indicating that the colors are similar.
-
- The CFAR function determines the hair region from the face based on brightness and color percentage.
- The function takes six arguments- G, B, and R, which are the average hair color values, g, b, and r, which are the color values to be considered,
- pro, which is the hair percentage from the face, and bri, which is the hair brightness.
- It first creates an empty array ar and appends the ratio between each color component (G/g, B/b, and R/r) to this array,
- only if the g, b, and r values are greater than a certain threshold. If the length of this array is equal to 0,
- it means that the function was not able to calculate any ratios and returns True,
- indicating that the current pixel belongs to the hair region. Otherwise, it checks the brightness value and based on different parameters, returns True or False.
-
- The vis_parsing_maps function takes input image, origin image, parsing annotation, stride as arguments and performs facial parsing.
- vis_parsing_anno does face segmentation using face segmentation model. For example, if vis_parsing_anno[_x][_y] = 1 then it will be face region,
- if vis_parsing_anno[_x][_y] = 17 then it will be hair region. But that is not exact answer.
- The hair region will contain all of the hair region but it will contain another region including facial area around the scalp.
- So we need to apply hair detection on the hair region. So we applied CFAR function for detection exact hair region for that.
- Finally, it changes the color of the detected hair region and saves the output image.
-
- The evaluate function uses the pre-trained BiSeNet model loaded from a file and passes an input image to it for parsing using the vis_parsing_maps function.
- The output image is then saved to the specified output path. The mode argument specifies the type of colored mask applied to the output image- gold, red, or black.
diff --git a/files/1.jpg b/files/1.jpg
deleted file mode 100644
index f54dfe0..0000000
Binary files a/files/1.jpg and /dev/null differ
diff --git a/files/1_gold.jpg b/files/1_gold.jpg
deleted file mode 100644
index 6d360d9..0000000
Binary files a/files/1_gold.jpg and /dev/null differ
diff --git a/files/2.JPG b/files/2.JPG
deleted file mode 100644
index b563edc..0000000
Binary files a/files/2.JPG and /dev/null differ
diff --git a/files/2_black.jpg b/files/2_black.jpg
deleted file mode 100644
index 4ee5d9d..0000000
Binary files a/files/2_black.jpg and /dev/null differ
diff --git a/files/2_gold.jpg b/files/2_gold.jpg
deleted file mode 100644
index 85d9bc2..0000000
Binary files a/files/2_gold.jpg and /dev/null differ
diff --git a/files/3.JPG b/files/3.JPG
deleted file mode 100644
index ac0affa..0000000
Binary files a/files/3.JPG and /dev/null differ
diff --git a/files/3_black.jpg b/files/3_black.jpg
deleted file mode 100644
index 3550a1c..0000000
Binary files a/files/3_black.jpg and /dev/null differ
diff --git a/files/3_gold.jpg b/files/3_gold.jpg
deleted file mode 100644
index 5e402f2..0000000
Binary files a/files/3_gold.jpg and /dev/null differ
diff --git a/files/4.jpg b/files/4.jpg
deleted file mode 100644
index e58be13..0000000
Binary files a/files/4.jpg and /dev/null differ
diff --git a/files/4_black.jpg b/files/4_black.jpg
deleted file mode 100644
index 951647c..0000000
Binary files a/files/4_black.jpg and /dev/null differ
diff --git a/files/4_gold.jpg b/files/4_gold.jpg
deleted file mode 100644
index 5e22f98..0000000
Binary files a/files/4_gold.jpg and /dev/null differ
diff --git a/files/5.jpg b/files/5.jpg
deleted file mode 100644
index 0ec33f8..0000000
Binary files a/files/5.jpg and /dev/null differ
diff --git a/files/5_gold.jpg b/files/5_gold.jpg
deleted file mode 100644
index 0500235..0000000
Binary files a/files/5_gold.jpg and /dev/null differ
diff --git a/files/5_red.jpg b/files/5_red.jpg
deleted file mode 100644
index f57ca45..0000000
Binary files a/files/5_red.jpg and /dev/null differ
diff --git a/files/6.jpg b/files/6.jpg
deleted file mode 100644
index b210560..0000000
Binary files a/files/6.jpg and /dev/null differ
diff --git a/files/6_black.jpg b/files/6_black.jpg
deleted file mode 100644
index 0802c29..0000000
Binary files a/files/6_black.jpg and /dev/null differ
diff --git a/files/6_gold.jpg b/files/6_gold.jpg
deleted file mode 100644
index 72e41f5..0000000
Binary files a/files/6_gold.jpg and /dev/null differ
diff --git a/model.py b/model.py
deleted file mode 100644
index 040f41f..0000000
--- a/model.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/python
-# -*- encoding: utf-8 -*-
-
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision
-
-from resnet import Resnet18
-# from modules.bn import InPlaceABNSync as BatchNorm2d
-
-
-class ConvBNReLU(nn.Module):
- def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):
- super(ConvBNReLU, self).__init__()
- self.conv = nn.Conv2d(in_chan,
- out_chan,
- kernel_size = ks,
- stride = stride,
- padding = padding,
- bias = False)
- self.bn = nn.BatchNorm2d(out_chan)
- self.init_weight()
-
- def forward(self, x):
- x = self.conv(x)
- x = F.relu(self.bn(x))
- return x
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
-class BiSeNetOutput(nn.Module):
- def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):
- super(BiSeNetOutput, self).__init__()
- self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
- self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False)
- self.init_weight()
-
- def forward(self, x):
- x = self.conv(x)
- x = self.conv_out(x)
- return x
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-class AttentionRefinementModule(nn.Module):
- def __init__(self, in_chan, out_chan, *args, **kwargs):
- super(AttentionRefinementModule, self).__init__()
- self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
- self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False)
- self.bn_atten = nn.BatchNorm2d(out_chan)
- self.sigmoid_atten = nn.Sigmoid()
- self.init_weight()
-
- def forward(self, x):
- feat = self.conv(x)
- atten = F.avg_pool2d(feat, feat.size()[2:])
- atten = self.conv_atten(atten)
- atten = self.bn_atten(atten)
- atten = self.sigmoid_atten(atten)
- out = torch.mul(feat, atten)
- return out
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
-
-class ContextPath(nn.Module):
- def __init__(self, *args, **kwargs):
- super(ContextPath, self).__init__()
- self.resnet = Resnet18()
- self.arm16 = AttentionRefinementModule(256, 128)
- self.arm32 = AttentionRefinementModule(512, 128)
- self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
- self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
- self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
-
- self.init_weight()
-
- def forward(self, x):
- H0, W0 = x.size()[2:]
- feat8, feat16, feat32 = self.resnet(x)
- H8, W8 = feat8.size()[2:]
- H16, W16 = feat16.size()[2:]
- H32, W32 = feat32.size()[2:]
-
- avg = F.avg_pool2d(feat32, feat32.size()[2:])
- avg = self.conv_avg(avg)
- avg_up = F.interpolate(avg, (H32, W32), mode='nearest')
-
- feat32_arm = self.arm32(feat32)
- feat32_sum = feat32_arm + avg_up
- feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest')
- feat32_up = self.conv_head32(feat32_up)
-
- feat16_arm = self.arm16(feat16)
- feat16_sum = feat16_arm + feat32_up
- feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest')
- feat16_up = self.conv_head16(feat16_up)
-
- return feat8, feat16_up, feat32_up # x8, x8, x16
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, (nn.Linear, nn.Conv2d)):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-### This is not used, since I replace this with the resnet feature with the same size
-class SpatialPath(nn.Module):
- def __init__(self, *args, **kwargs):
- super(SpatialPath, self).__init__()
- self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3)
- self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
- self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
- self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0)
- self.init_weight()
-
- def forward(self, x):
- feat = self.conv1(x)
- feat = self.conv2(feat)
- feat = self.conv3(feat)
- feat = self.conv_out(feat)
- return feat
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-class FeatureFusionModule(nn.Module):
- def __init__(self, in_chan, out_chan, *args, **kwargs):
- super(FeatureFusionModule, self).__init__()
- self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
- self.conv1 = nn.Conv2d(out_chan,
- out_chan//4,
- kernel_size = 1,
- stride = 1,
- padding = 0,
- bias = False)
- self.conv2 = nn.Conv2d(out_chan//4,
- out_chan,
- kernel_size = 1,
- stride = 1,
- padding = 0,
- bias = False)
- self.relu = nn.ReLU(inplace=True)
- self.sigmoid = nn.Sigmoid()
- self.init_weight()
-
- def forward(self, fsp, fcp):
- fcat = torch.cat([fsp, fcp], dim=1)
- feat = self.convblk(fcat)
- atten = F.avg_pool2d(feat, feat.size()[2:])
- atten = self.conv1(atten)
- atten = self.relu(atten)
- atten = self.conv2(atten)
- atten = self.sigmoid(atten)
- feat_atten = torch.mul(feat, atten)
- feat_out = feat_atten + feat
- return feat_out
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-class BiSeNet(nn.Module):
- def __init__(self, n_classes, *args, **kwargs):
- super(BiSeNet, self).__init__()
- self.cp = ContextPath()
- ## here self.sp is deleted
- self.ffm = FeatureFusionModule(256, 256)
- self.conv_out = BiSeNetOutput(256, 256, n_classes)
- self.conv_out16 = BiSeNetOutput(128, 64, n_classes)
- self.conv_out32 = BiSeNetOutput(128, 64, n_classes)
- self.init_weight()
-
- def forward(self, x):
- H, W = x.size()[2:]
- feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature
- feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature
- feat_fuse = self.ffm(feat_sp, feat_cp8)
-
- feat_out = self.conv_out(feat_fuse)
- feat_out16 = self.conv_out16(feat_cp8)
- feat_out32 = self.conv_out32(feat_cp16)
-
- feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True)
- feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True)
- feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True)
- return feat_out, feat_out16, feat_out32
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], []
- for name, child in self.named_children():
- child_wd_params, child_nowd_params = child.get_params()
- if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput):
- lr_mul_wd_params += child_wd_params
- lr_mul_nowd_params += child_nowd_params
- else:
- wd_params += child_wd_params
- nowd_params += child_nowd_params
- return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params
-
-
-if __name__ == "__main__":
- net = BiSeNet(19)
- net.cuda()
- net.eval()
- in_ten = torch.randn(16, 3, 640, 480).cuda()
- out, out16, out32 = net(in_ten)
- print(out.shape)
-
- net.get_params()
diff --git a/model/model.pth b/model/model.pth
deleted file mode 100644
index a125015..0000000
Binary files a/model/model.pth and /dev/null differ
diff --git a/resnet.py b/resnet.py
deleted file mode 100644
index aa2bf95..0000000
--- a/resnet.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/python
-# -*- encoding: utf-8 -*-
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.model_zoo as modelzoo
-
-# from modules.bn import InPlaceABNSync as BatchNorm2d
-
-resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
-
-
-def conv3x3(in_planes, out_planes, stride=1):
- """3x3 convolution with padding"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
-
-
-class BasicBlock(nn.Module):
- def __init__(self, in_chan, out_chan, stride=1):
- super(BasicBlock, self).__init__()
- self.conv1 = conv3x3(in_chan, out_chan, stride)
- self.bn1 = nn.BatchNorm2d(out_chan)
- self.conv2 = conv3x3(out_chan, out_chan)
- self.bn2 = nn.BatchNorm2d(out_chan)
- self.relu = nn.ReLU(inplace=True)
- self.downsample = None
- if in_chan != out_chan or stride != 1:
- self.downsample = nn.Sequential(
- nn.Conv2d(in_chan, out_chan,
- kernel_size=1, stride=stride, bias=False),
- nn.BatchNorm2d(out_chan),
- )
-
- def forward(self, x):
- residual = self.conv1(x)
- residual = F.relu(self.bn1(residual))
- residual = self.conv2(residual)
- residual = self.bn2(residual)
-
- shortcut = x
- if self.downsample is not None:
- shortcut = self.downsample(x)
-
- out = shortcut + residual
- out = self.relu(out)
- return out
-
-
-def create_layer_basic(in_chan, out_chan, bnum, stride=1):
- layers = [BasicBlock(in_chan, out_chan, stride=stride)]
- for i in range(bnum-1):
- layers.append(BasicBlock(out_chan, out_chan, stride=1))
- return nn.Sequential(*layers)
-
-
-class Resnet18(nn.Module):
- def __init__(self):
- super(Resnet18, self).__init__()
- self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
- bias=False)
- self.bn1 = nn.BatchNorm2d(64)
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
- self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
- self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
- self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
- self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
- self.init_weight()
-
- def forward(self, x):
- x = self.conv1(x)
- x = F.relu(self.bn1(x))
- x = self.maxpool(x)
-
- x = self.layer1(x)
- feat8 = self.layer2(x) # 1/8
- feat16 = self.layer3(feat8) # 1/16
- feat32 = self.layer4(feat16) # 1/32
- return feat8, feat16, feat32
-
- def init_weight(self):
- state_dict = modelzoo.load_url(resnet18_url)
- self_state_dict = self.state_dict()
- for k, v in state_dict.items():
- if 'fc' in k: continue
- self_state_dict.update({k: v})
- self.load_state_dict(self_state_dict)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, (nn.Linear, nn.Conv2d)):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-if __name__ == "__main__":
- net = Resnet18()
- x = torch.randn(16, 3, 224, 224)
- out = net(x)
- print(out[0].size())
- print(out[1].size())
- print(out[2].size())
- net.get_params()
diff --git a/run.py b/run.py
deleted file mode 100644
index 44f48e8..0000000
--- a/run.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from model import BiSeNet
-import torch
-import os.path as osp
-import numpy as np
-from PIL import Image
-import torchvision.transforms as transforms
-import cv2
-
-def similar(G1,B1,R1,G2,B2,R2):
- ar=[]
- if G2 > 30:
- ar.append(1000.*G1/G2)
- if B2 > 30:
- ar.append(1000.*B1/B2)
- if R2 > 30:
- ar.append(1000.*R1/R2)
- if len(ar) < 1:
- return False
- if min(ar) == 0:
- return False
- br = max(R1,G1,B1) / max(G2,B2,R2)
- return max(ar) / min(ar) < 1.55 and br > 0.7 and br < 1.4
-
-def CFAR(G,B,R,g,b,r,pro,bri):
- ar=[]
- if g > 30:
- ar.append(G/g)
- if b > 30:
- ar.append(B/b)
- if r > 30:
- ar.append(R/r)
- if len(ar) == 0:
- return True
- if bri > 120 :
- return max(ar)/min(ar) < 2
- if bri < 70 :
- return max(ar)/min(ar) < 1.7
- if pro < 0.35 :
- return max(ar)/min(ar) < 1.6 and max(ar) > 0.8
- else :
- return max(ar)/min(ar) < 1.7 and max(ar) > 0.65
-
-def vis_parsing_maps(im, origin, parsing_anno, stride, save_im=False, save_path='output.jpg', mod='gold'):
-
- im = np.array(im)
- vis_im = im.copy().astype(np.uint8)
- vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
- vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
-
- num_of_class = np.max(vis_parsing_anno)
-
- SB = 0
- SR = 0
- SG = 0
- cnt = 0
- total = 0
- brigh = 0
- FB = 0
- FR = 0
- FG = 0
- FN = 0
- for x in range(0, origin.shape[0]):
- for y in range(0, origin.shape[1]):
- _x = int(x * 512 / origin.shape[0])
- _y = int(y * 512 / origin.shape[1])
- if vis_parsing_anno[_x][_y] == 1:
- FB = FB + int(origin[x][y][0])
- FG = FG + int(origin[x][y][1])
- FR = FR + int(origin[x][y][2])
- FN = FN + 1
- FB = int(FB / FN)
- FR = int(FR / FN)
- FG = int(FG / FN)
-
- for x in range(0, origin.shape[0]):
- for y in range(0, origin.shape[1]):
- _x = int(x * 512 / origin.shape[0])
- _y = int(y * 512 / origin.shape[1])
- if vis_parsing_anno[_x][_y] == 17:
- OB = int(origin[x][y][0])
- OG = int(origin[x][y][1])
- OR = int(origin[x][y][2])
- if similar(OB,OG,OR,FB,FG,FR) :
- continue
- SB = SB + OB
- SG = SG + OG
- SR = SR + OR
- cnt = cnt + 1
- brigh = brigh + OR + OG + OR
- if vis_parsing_anno[_x][_y] <= 17:
- total = total + 1
- pro = cnt / total
- SB = int(SB / cnt)
- SG = int(SG / cnt)
- SR = int(SR / cnt)
- brigh = brigh / cnt / 3
-
- for x in range(0, origin.shape[0]):
- for y in range(0, origin.shape[1]):
- _x = int(x * 512 / origin.shape[0])
- _y = int(y * 512 / origin.shape[1])
- if vis_parsing_anno[_x][_y] == 17:
- OB = int(origin[x][y][0])
- OG = int(origin[x][y][1])
- OR = int(origin[x][y][2])
- if similar(OB,OG,OR,FB,FG,FR) :
- continue
- cur = origin[x][y]
- sum = int(cur[0]) + int(cur[1]) + int(cur[2])
- if mod=='gold':
- GB = 0
- GG = 215 * 0.8
- GR = 255 * 0.8
- if mod=='red':
- GB = 50
- GG = 80
- GR = 255
- if mod=='black':
- GB = 100
- GG = 110
- GR = 125
-
- if brigh > 120 :
- param = 20
- p = (sum + param) * (sum + param) / (brigh + param) / (brigh + param) / 20
- elif brigh < 80 :
- p = sum * 70 / 520 / brigh
- else :
- p = sum / 520
- if CFAR(SB,SG,SR,cur[0],cur[1],cur[2],pro,brigh):
- cur[0] = min(255, int(GB * p))
- cur[1] = min(255, int(GG * p))
- cur[2] = min(255, int(GR * p))
-
- if save_im:
- cv2.imwrite(save_path, origin, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
-
-def evaluate(cp='model/model.pth', input_path='4.jpg', output_path='output.jpg', mode='gold'):
-
- n_classes = 19
- net = BiSeNet(n_classes=n_classes)
- net.cpu()
- save_pth = osp.join('', cp)
- net.load_state_dict(torch.load(save_pth, map_location=torch.device('cpu')))
- net.eval()
-
- to_tensor = transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
- ])
- with torch.no_grad():
- img = Image.open(input_path)
- origin = cv2.imread(input_path, cv2.IMREAD_UNCHANGED)
- image = img.resize((512,512))
- # image = img.resize((512, 512), Image.ANTIALIAS)
- # image = img.resize((512, 512), Image.NEAREST)
- # image = img.resize((512, 512), Image.LANCZOS)
- # image = img.resize((512, 512), Image.BILINEAR)
- img = to_tensor(image)
- img = torch.unsqueeze(img, 0)
- img = img.cpu()
- out = net(img)[0]
- parsing = out.squeeze(0).cpu().numpy().argmax(0)
- vis_parsing_maps(image, origin, parsing, stride=1, save_im=True, save_path=output_path, mod=mode)
-
-
-if __name__ == "__main__":
- evaluate(input_path='files/4.JPG', output_path='files/4_gold.jpg', mode='gold')