Signed-off-by: NYH <175484793@qq.com>
|
|
@ -0,0 +1,81 @@
|
|||
<component name="InspectionProjectProfileManager">
|
||||
<profile version="1.0">
|
||||
<option name="myName" value="Project Default" />
|
||||
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="ignoredPackages">
|
||||
<value>
|
||||
<list size="68">
|
||||
<item index="0" class="java.lang.String" itemvalue="thop" />
|
||||
<item index="1" class="java.lang.String" itemvalue="Cython" />
|
||||
<item index="2" class="java.lang.String" itemvalue="pycocotools" />
|
||||
<item index="3" class="java.lang.String" itemvalue="pandas" />
|
||||
<item index="4" class="java.lang.String" itemvalue="tqdm" />
|
||||
<item index="5" class="java.lang.String" itemvalue="scipy" />
|
||||
<item index="6" class="java.lang.String" itemvalue="pillow" />
|
||||
<item index="7" class="java.lang.String" itemvalue="numpy" />
|
||||
<item index="8" class="java.lang.String" itemvalue="requests" />
|
||||
<item index="9" class="java.lang.String" itemvalue="easydict" />
|
||||
<item index="10" class="java.lang.String" itemvalue="imutil" />
|
||||
<item index="11" class="java.lang.String" itemvalue="pkg-resources" />
|
||||
<item index="12" class="java.lang.String" itemvalue="hyperpyyaml" />
|
||||
<item index="13" class="java.lang.String" itemvalue="pytest" />
|
||||
<item index="14" class="java.lang.String" itemvalue="yamllint" />
|
||||
<item index="15" class="java.lang.String" itemvalue="sentencepiece" />
|
||||
<item index="16" class="java.lang.String" itemvalue="black" />
|
||||
<item index="17" class="java.lang.String" itemvalue="pre-commit" />
|
||||
<item index="18" class="java.lang.String" itemvalue="pycodestyle" />
|
||||
<item index="19" class="java.lang.String" itemvalue="torch" />
|
||||
<item index="20" class="java.lang.String" itemvalue="torchaudio" />
|
||||
<item index="21" class="java.lang.String" itemvalue="flake8" />
|
||||
<item index="22" class="java.lang.String" itemvalue="huggingface_hub" />
|
||||
<item index="23" class="java.lang.String" itemvalue="numba" />
|
||||
<item index="24" class="java.lang.String" itemvalue="librosa" />
|
||||
<item index="25" class="java.lang.String" itemvalue="visualdl" />
|
||||
<item index="26" class="java.lang.String" itemvalue="pyaudio" />
|
||||
<item index="27" class="java.lang.String" itemvalue="joblib" />
|
||||
<item index="28" class="java.lang.String" itemvalue="packaging" />
|
||||
<item index="29" class="java.lang.String" itemvalue="opencv-python" />
|
||||
<item index="30" class="java.lang.String" itemvalue="pytorch-lightning" />
|
||||
<item index="31" class="java.lang.String" itemvalue="scikit-learn" />
|
||||
<item index="32" class="java.lang.String" itemvalue="speechbrain" />
|
||||
<item index="33" class="java.lang.String" itemvalue="webrtcvad" />
|
||||
<item index="34" class="java.lang.String" itemvalue="yacs" />
|
||||
<item index="35" class="java.lang.String" itemvalue="prefetch_generator" />
|
||||
<item index="36" class="java.lang.String" itemvalue="pathspec" />
|
||||
<item index="37" class="java.lang.String" itemvalue="addict" />
|
||||
<item index="38" class="java.lang.String" itemvalue="loguru" />
|
||||
<item index="39" class="java.lang.String" itemvalue="glog" />
|
||||
<item index="40" class="java.lang.String" itemvalue="mmcv" />
|
||||
<item index="41" class="java.lang.String" itemvalue="Shapely" />
|
||||
<item index="42" class="java.lang.String" itemvalue="yapf" />
|
||||
<item index="43" class="java.lang.String" itemvalue="cityscapesscripts" />
|
||||
<item index="44" class="java.lang.String" itemvalue="asynctest" />
|
||||
<item index="45" class="java.lang.String" itemvalue="albumentations" />
|
||||
<item index="46" class="java.lang.String" itemvalue="imagecorruptions" />
|
||||
<item index="47" class="java.lang.String" itemvalue="pytest-runner" />
|
||||
<item index="48" class="java.lang.String" itemvalue="pytest-cov" />
|
||||
<item index="49" class="java.lang.String" itemvalue="isort" />
|
||||
<item index="50" class="java.lang.String" itemvalue="kwarray" />
|
||||
<item index="51" class="java.lang.String" itemvalue="xdoctest" />
|
||||
<item index="52" class="java.lang.String" itemvalue="codecov" />
|
||||
<item index="53" class="java.lang.String" itemvalue="ubelt" />
|
||||
<item index="54" class="java.lang.String" itemvalue="Pillow" />
|
||||
<item index="55" class="java.lang.String" itemvalue="terminaltables" />
|
||||
<item index="56" class="java.lang.String" itemvalue="tensorflow" />
|
||||
<item index="57" class="java.lang.String" itemvalue="tensorflow_gpu" />
|
||||
<item index="58" class="java.lang.String" itemvalue="scikit_learn" />
|
||||
<item index="59" class="java.lang.String" itemvalue="matplotlib" />
|
||||
<item index="60" class="java.lang.String" itemvalue="opencv_contrib_python" />
|
||||
<item index="61" class="java.lang.String" itemvalue="tensorboard" />
|
||||
<item index="62" class="java.lang.String" itemvalue="seaborn" />
|
||||
<item index="63" class="java.lang.String" itemvalue="h5py" />
|
||||
<item index="64" class="java.lang.String" itemvalue="torchvision" />
|
||||
<item index="65" class="java.lang.String" itemvalue="opencv_python" />
|
||||
<item index="66" class="java.lang.String" itemvalue="pytorch" />
|
||||
<item index="67" class="java.lang.String" itemvalue="opencv" />
|
||||
</list>
|
||||
</value>
|
||||
</option>
|
||||
</inspection_tool>
|
||||
</profile>
|
||||
</component>
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
<component name="InspectionProjectProfileManager">
|
||||
<settings>
|
||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||
<version value="1.0" />
|
||||
</settings>
|
||||
</component>
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2019
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
130
README.md
|
|
@ -1,3 +1,129 @@
|
|||
# Retinaface_3point_Detection
|
||||
# RetinaFace in PyTorch
|
||||
|
||||
二维码三点方向检测
|
||||
A [PyTorch](https://pytorch.org/) implementation of [RetinaFace: Single-stage Dense Face Localisation in the Wild](https://arxiv.org/abs/1905.00641). Model size only 1.7M, when Retinaface use mobilenet0.25 as backbone net. We also provide resnet50 as backbone net to get better result. The official code in Mxnet can be found [here](https://github.com/deepinsight/insightface/tree/master/RetinaFace).
|
||||
|
||||
## Mobile or Edge device deploy
|
||||
We also provide a set of Face Detector for edge device in [here](https://github.com/biubug6/Face-Detector-1MB-with-landmark) from python training to C++ inference.
|
||||
|
||||
## WiderFace Val Performance in single scale When using Resnet50 as backbone net.
|
||||
| Style | easy | medium | hard |
|
||||
|:-|:-:|:-:|:-:|
|
||||
| Pytorch (same parameter with Mxnet) | 94.82 % | 93.84% | 89.60% |
|
||||
| Pytorch (original image scale) | 95.48% | 94.04% | 84.43% |
|
||||
| Mxnet | 94.86% | 93.87% | 88.33% |
|
||||
| Mxnet(original image scale) | 94.97% | 93.89% | 82.27% |
|
||||
|
||||
## WiderFace Val Performance in single scale When using Mobilenet0.25 as backbone net.
|
||||
| Style | easy | medium | hard |
|
||||
|:-|:-:|:-:|:-:|
|
||||
| Pytorch (same parameter with Mxnet) | 88.67% | 87.09% | 80.99% |
|
||||
| Pytorch (original image scale) | 90.70% | 88.16% | 73.82% |
|
||||
| Mxnet | 88.72% | 86.97% | 79.19% |
|
||||
| Mxnet(original image scale) | 89.58% | 87.11% | 69.12% |
|
||||
<p align="center"><img src="curve/Widerface.jpg" width="640"\></p>
|
||||
|
||||
## FDDB Performance.
|
||||
| FDDB(pytorch) | performance |
|
||||
|:-|:-:|
|
||||
| Mobilenet0.25 | 98.64% |
|
||||
| Resnet50 | 99.22% |
|
||||
<p align="center"><img src="curve/FDDB.png" width="640"\></p>
|
||||
|
||||
### Contents
|
||||
- [Installation](#installation)
|
||||
- [Training](#training)
|
||||
- [Evaluation](#evaluation)
|
||||
- [TensorRT](#tensorrt)
|
||||
- [References](#references)
|
||||
|
||||
## Installation
|
||||
##### Clone and install
|
||||
1. git clone https://github.com/biubug6/Pytorch_Retinaface.git
|
||||
|
||||
2. Pytorch version 1.1.0+ and torchvision 0.3.0+ are needed.
|
||||
|
||||
3. Codes are based on Python 3
|
||||
|
||||
##### Data
|
||||
1. Download the [WIDERFACE](http://shuoyang1213.me/WIDERFACE/WiderFace_Results.html) dataset.
|
||||
|
||||
2. Download annotations (face bounding boxes & five facial landmarks) from [baidu cloud](https://pan.baidu.com/s/1Laby0EctfuJGgGMgRRgykA) or [dropbox](https://www.dropbox.com/s/7j70r3eeepe4r2g/retinaface_gt_v1.1.zip?dl=0)
|
||||
|
||||
3. Organise the dataset directory as follows:
|
||||
|
||||
```Shell
|
||||
./data/widerface/
|
||||
train/
|
||||
images/
|
||||
label.txt
|
||||
val/
|
||||
images/
|
||||
wider_val.txt
|
||||
```
|
||||
ps: wider_val.txt only include val file names but not label information.
|
||||
|
||||
##### Data1
|
||||
We also provide the organized dataset we used as in the above directory structure.
|
||||
|
||||
Link: from [google cloud](https://drive.google.com/open?id=11UGV3nbVv1x9IC--_tK3Uxf7hA6rlbsS) or [baidu cloud](https://pan.baidu.com/s/1jIp9t30oYivrAvrgUgIoLQ) Password: ruck
|
||||
|
||||
## Training
|
||||
We provide restnet50 and mobilenet0.25 as backbone network to train model.
|
||||
We trained Mobilenet0.25 on imagenet dataset and get 46.58% in top 1. If you do not wish to train the model, we also provide trained model. Pretrain model and trained model are put in [google cloud](https://drive.google.com/open?id=1oZRSG0ZegbVkVwUd8wUIQx8W7yfZ_ki1) and [baidu cloud](https://pan.baidu.com/s/12h97Fy1RYuqMMIV-RpzdPg) Password: fstq . The model could be put as follows:
|
||||
```Shell
|
||||
./weights/
|
||||
mobilenet0.25_Final.pth
|
||||
mobilenetV1X0.25_pretrain.tar
|
||||
Resnet50_Final.pth
|
||||
```
|
||||
1. Before training, you can check network configuration (e.g. batch_size, min_sizes and steps etc..) in ``data/config.py and train.py``.
|
||||
|
||||
2. Train the model using WIDER FACE:
|
||||
```Shell
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3 python train.py --network resnet50 or
|
||||
CUDA_VISIBLE_DEVICES=0 python train.py --network mobile0.25
|
||||
```
|
||||
|
||||
|
||||
## Evaluation
|
||||
### Evaluation widerface val
|
||||
1. Generate txt file
|
||||
```Shell
|
||||
python test_widerface.py --trained_model weight_file --network mobile0.25 or resnet50
|
||||
```
|
||||
2. Evaluate txt results. Demo come from [Here](https://github.com/wondervictor/WiderFace-Evaluation)
|
||||
```Shell
|
||||
cd ./widerface_evaluate
|
||||
python setup.py build_ext --inplace
|
||||
python evaluation.py
|
||||
```
|
||||
3. You can also use widerface official Matlab evaluate demo in [Here](http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/WiderFace_Results.html)
|
||||
### Evaluation FDDB
|
||||
|
||||
1. Download the images [FDDB](https://drive.google.com/open?id=17t4WULUDgZgiSy5kpCax4aooyPaz3GQH) to:
|
||||
```Shell
|
||||
./data/FDDB/images/
|
||||
```
|
||||
|
||||
2. Evaluate the trained model using:
|
||||
```Shell
|
||||
python test_fddb.py --trained_model weight_file --network mobile0.25 or resnet50
|
||||
```
|
||||
|
||||
3. Download [eval_tool](https://bitbucket.org/marcopede/face-eval) to evaluate the performance.
|
||||
|
||||
<p align="center"><img src="curve/1.jpg" width="640"\></p>
|
||||
|
||||
## TensorRT
|
||||
-[TensorRT](https://github.com/wang-xinyu/tensorrtx/tree/master/retinaface)
|
||||
|
||||
## References
|
||||
- [FaceBoxes](https://github.com/zisianw/FaceBoxes.PyTorch)
|
||||
- [Retinaface (mxnet)](https://github.com/deepinsight/insightface/tree/master/RetinaFace)
|
||||
```
|
||||
@inproceedings{deng2019retinaface,
|
||||
title={RetinaFace: Single-stage Dense Face Localisation in the Wild},
|
||||
author={Deng, Jiankang and Guo, Jia and Yuxiang, Zhou and Jinke Yu and Irene Kotsia and Zafeiriou, Stefanos},
|
||||
booktitle={arxiv},
|
||||
year={2019}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -0,0 +1,88 @@
|
|||
from __future__ import print_function
|
||||
import os
|
||||
import argparse
|
||||
import torch
|
||||
import torch.backends.cudnn as cudnn
|
||||
import numpy as np
|
||||
from data import cfg_mnet, cfg_re50
|
||||
from layers.functions.prior_box import PriorBox
|
||||
from utils.nms.py_cpu_nms import py_cpu_nms
|
||||
import cv2
|
||||
from models.retinaface import RetinaFace
|
||||
from utils.box_utils import decode, decode_landm
|
||||
from utils.timer import Timer
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Test')
|
||||
parser.add_argument('-m', '--trained_model', default='./weights/mobilenet0.25_Final.pth',
|
||||
type=str, help='Trained state_dict file path to open')
|
||||
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
|
||||
parser.add_argument('--long_side', default=640, help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
|
||||
parser.add_argument('--cpu', action="store_true", default=True, help='Use cpu inference')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def check_keys(model, pretrained_state_dict):
|
||||
ckpt_keys = set(pretrained_state_dict.keys())
|
||||
model_keys = set(model.state_dict().keys())
|
||||
used_pretrained_keys = model_keys & ckpt_keys
|
||||
unused_pretrained_keys = ckpt_keys - model_keys
|
||||
missing_keys = model_keys - ckpt_keys
|
||||
print('Missing keys:{}'.format(len(missing_keys)))
|
||||
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
|
||||
print('Used keys:{}'.format(len(used_pretrained_keys)))
|
||||
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
|
||||
return True
|
||||
|
||||
|
||||
def remove_prefix(state_dict, prefix):
|
||||
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
|
||||
print('remove prefix \'{}\''.format(prefix))
|
||||
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
|
||||
return {f(key): value for key, value in state_dict.items()}
|
||||
|
||||
|
||||
def load_model(model, pretrained_path, load_to_cpu):
|
||||
print('Loading pretrained model from {}'.format(pretrained_path))
|
||||
if load_to_cpu:
|
||||
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
|
||||
else:
|
||||
device = torch.cuda.current_device()
|
||||
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
|
||||
if "state_dict" in pretrained_dict.keys():
|
||||
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
|
||||
else:
|
||||
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
|
||||
check_keys(model, pretrained_dict)
|
||||
model.load_state_dict(pretrained_dict, strict=False)
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
torch.set_grad_enabled(False)
|
||||
cfg = None
|
||||
if args.network == "mobile0.25":
|
||||
cfg = cfg_mnet
|
||||
elif args.network == "resnet50":
|
||||
cfg = cfg_re50
|
||||
# net and model
|
||||
net = RetinaFace(cfg=cfg, phase = 'test')
|
||||
net = load_model(net, args.trained_model, args.cpu)
|
||||
net.eval()
|
||||
print('Finished loading model!')
|
||||
print(net)
|
||||
device = torch.device("cpu" if args.cpu else "cuda")
|
||||
net = net.to(device)
|
||||
|
||||
# ------------------------ export -----------------------------
|
||||
output_onnx = 'FaceDetector.onnx'
|
||||
print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
|
||||
input_names = ["input0"]
|
||||
output_names = ["output0"]
|
||||
inputs = torch.randn(1, 3, args.long_side, args.long_side).to(device)
|
||||
|
||||
torch_out = torch.onnx._export(net, inputs, output_onnx, export_params=True, verbose=False,
|
||||
input_names=input_names, output_names=output_names)
|
||||
|
||||
|
||||
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 82 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 74 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 72 KiB |
|
After Width: | Height: | Size: 74 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
After Width: | Height: | Size: 74 KiB |
|
After Width: | Height: | Size: 72 KiB |
|
After Width: | Height: | Size: 65 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 66 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 61 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 63 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 63 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 64 KiB |
|
After Width: | Height: | Size: 66 KiB |
|
After Width: | Height: | Size: 64 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 61 KiB |
|
After Width: | Height: | Size: 61 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 66 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 63 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 74 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 76 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 71 KiB |
|
After Width: | Height: | Size: 73 KiB |
|
After Width: | Height: | Size: 82 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 76 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 74 KiB |
|
After Width: | Height: | Size: 72 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 73 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 67 KiB |
|
After Width: | Height: | Size: 67 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 63 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 66 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 64 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 58 KiB |