-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathevaluate.py
138 lines (107 loc) · 5 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import os
import argparse
import torch
import imageio
import numpy as np
from tqdm import tqdm
from PIL import Image
import modules.midas.utils as utils
import pipeline
import metrics
def evaluate(dataset_path, depth_predictor, nsamples, sml_model_path):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
# ranges for VOID
min_depth, max_depth = 0.2, 5.0
min_pred, max_pred = 0.1, 8.0
# instantiate method
method = pipeline.VIDepth(
depth_predictor, nsamples, sml_model_path,
min_pred, max_pred, min_depth, max_depth, device
)
# get inputs
with open(f"{dataset_path}/void_{nsamples}/test_image.txt") as f:
test_image_list = [line.rstrip() for line in f]
# initialize error aggregators
avg_error_w_int_depth = metrics.ErrorMetricsAverager()
avg_error_w_pred = metrics.ErrorMetricsAverager()
# iterate through inputs list
for i in tqdm(range(len(test_image_list))):
# image
input_image_fp = os.path.join(dataset_path, test_image_list[i])
input_image = utils.read_image(input_image_fp)
# sparse depth
input_sparse_depth_fp = input_image_fp.replace("image", "sparse_depth")
input_sparse_depth = np.array(Image.open(input_sparse_depth_fp), dtype=np.float32) / 256.0
input_sparse_depth[input_sparse_depth <= 0] = 0.0
# sparse depth validity map
validity_map_fp = input_image_fp.replace("image", "validity_map")
validity_map = np.array(Image.open(validity_map_fp), dtype=np.float32)
assert(np.all(np.unique(validity_map) == [0, 256]))
validity_map[validity_map > 0] = 1
# target (ground truth) depth
target_depth_fp = input_image_fp.replace("image", "ground_truth")
target_depth = np.array(Image.open(target_depth_fp), dtype=np.float32) / 256.0
target_depth[target_depth <= 0] = 0.0
# target depth valid/mask
mask = (target_depth < max_depth)
if min_depth is not None:
mask *= (target_depth > min_depth)
target_depth[~mask] = np.inf # set invalid depth
target_depth = 1.0 / target_depth
# run pipeline
output = method.run(input_image, input_sparse_depth, validity_map, device)
# compute error metrics using intermediate (globally aligned) depth
error_w_int_depth = metrics.ErrorMetrics()
error_w_int_depth.compute(
estimate = output["ga_depth"],
target = target_depth,
valid = mask.astype(np.bool),
)
# compute error metrics using SML output depth
error_w_pred = metrics.ErrorMetrics()
error_w_pred.compute(
estimate = output["sml_depth"],
target = target_depth,
valid = mask.astype(np.bool),
)
# accumulate error metrics
avg_error_w_int_depth.accumulate(error_w_int_depth)
avg_error_w_pred.accumulate(error_w_pred)
# compute average error metrics
print("Averaging metrics for globally-aligned depth over {} samples".format(
avg_error_w_int_depth.total_count
))
avg_error_w_int_depth.average()
print("Averaging metrics for SML-aligned depth over {} samples".format(
avg_error_w_pred.total_count
))
avg_error_w_pred.average()
from prettytable import PrettyTable
summary_tb = PrettyTable()
summary_tb.field_names = ["metric", "GA Only", "GA+SML"]
summary_tb.add_row(["RMSE", f"{avg_error_w_int_depth.rmse_avg:7.2f}", f"{avg_error_w_pred.rmse_avg:7.2f}"])
summary_tb.add_row(["MAE", f"{avg_error_w_int_depth.mae_avg:7.2f}", f"{avg_error_w_pred.mae_avg:7.2f}"])
summary_tb.add_row(["AbsRel", f"{avg_error_w_int_depth.absrel_avg:8.3f}", f"{avg_error_w_pred.absrel_avg:8.3f}"])
summary_tb.add_row(["iRMSE", f"{avg_error_w_int_depth.inv_rmse_avg:7.2f}", f"{avg_error_w_pred.inv_rmse_avg:7.2f}"])
summary_tb.add_row(["iMAE", f"{avg_error_w_int_depth.inv_mae_avg:7.2f}", f"{avg_error_w_pred.inv_mae_avg:7.2f}"])
summary_tb.add_row(["iAbsRel", f"{avg_error_w_int_depth.inv_absrel_avg:8.3f}", f"{avg_error_w_pred.inv_absrel_avg:8.3f}"])
print(summary_tb)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-ds', '--dataset-path', type=str, default='/path/to/void_release/',
help='Path to VOID release dataset.')
parser.add_argument('-dp', '--depth-predictor', type=str, default='midas_small',
help='Name of depth predictor to use in pipeline.')
parser.add_argument('-ns', '--nsamples', type=int, default=150,
help='Number of sparse metric depth samples available.')
parser.add_argument('-sm', '--sml-model-path', type=str, default='',
help='Path to trained SML model weights.')
args = parser.parse_args()
print(args)
evaluate(
args.dataset_path,
args.depth_predictor,
args.nsamples,
args.sml_model_path,
)