-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathuncertainty_estimate.py
123 lines (99 loc) · 3.82 KB
/
uncertainty_estimate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import torch
import numpy as np
import os
from collections import Counter
def summary_class(data,
n_classes=10,
out_dir="./stat",
note="",
write_mode='a+',
targets=None):
'''
Function creates/append a summary file based on the input data.
File is created for every value of the minibatch, thus,
order of data should be fixed and not shuffled across iterations.
File header: Median; Mean; Var; Lb, Rb; frequencies 1:n_classes
input:
data - first dimension: number of testing iterations = samples
second dimension: minibtach
values: ind of the class with maximum score
'''
os.makedirs(out_dir, exist_ok=True)
predicted = []
data = data.data.cpu().numpy()
for i in range(data.shape[1]):
summary_file_path = "%s/%d%s.txt" % (out_dir, i, note)
data_ = data[:, i] # data for 1 example
# Statistics
mean = data_.mean()
median = np.median(data_)
std = data_.std()
lb, rb = mean - 1.96 * std, mean + 1.96 * std
counter = Counter(data_.tolist())
freq = [0] * n_classes
for j in range(n_classes):
freq[j] = counter[j] #/ len(data_)
# Summary to the file
target_str = str(
targets[i].data.cpu().numpy()) if targets[i] is not None else ''
summary_file = open(summary_file_path, write_mode)
summary_file.write(
"%.6f;%.6f;%.6f;%.6f,%.6f;%s;%s\n" %
(median, mean, std, lb, rb, ','.join(map(str, freq)), target_str))
summary_file.close()
# Predicted value for this example
predicted.append(np.argmax(freq))
return torch.tensor(predicted, dtype=torch.int64).reshape(1, data.shape[1])
def get_prediction_class(data, n_classes=10, is_score=False, threshold=0.5):
'''
Function the most common class in data per entry among n_test_iter
data - first dimension: number of testing iterations = samples
second dimension: minibtach
values: ind of the class with maximum score
'''
predicted = []
if is_score:
data = torch.round(data - (threshold - 0.5))
data = data.data.cpu().numpy()
if data.shape[0] == 1:
return torch.tensor(data, dtype=torch.int64).reshape(1, data.shape[1])
for i in range(data.shape[1]):
data_ = data[:, i] # data for 1 example
# Statistics
counter = Counter(data_.tolist())
freq = [0] * n_classes
for i in range(n_classes):
freq[i] = counter[i]
# Predicted value for this example
predicted.append(np.argmax(freq))
return torch.tensor(predicted, dtype=torch.int64).reshape(1, data.shape[1])
def get_norm_parameters(net):
'''
Function returns l2 norm of parameters
'''
vals = {}
uniq_par = set([
name.split('.')[-1] for name, p in net.named_parameters()
if p.requires_grad and "post_" in name
])
for key in uniq_par:
vals[key] = 0
for name, par in net.named_parameters():
if par.requires_grad:
var = name.split(".")[-1] # make it post_mean, post_var and so on
if "post_" not in var:
continue
val = par.detach().cpu().numpy()
vals[var] += (val**2).sum()
for key in vals.keys():
vals[key] = np.sqrt(vals[key])
return (vals)
def summary_parameters(net, out_dir="./stat", note="", write_mode='a+'):
os.makedirs(out_dir, exist_ok=True)
w_norm = get_norm_parameters(net)
for param in w_norm.keys():
summary_file_path = "%s/%s%s.txt" % (out_dir, param, note)
# Summary to the file
summary_file = open(summary_file_path, write_mode)
summary_file.write("%f\n" % w_norm[param])
summary_file.close()