@@ -104,6 +104,50 @@ def inner(y, hy):
104104 ** kwargs )
105105
106106
107+ @perf_docs
108+ def top_k_accuracy_score (y_true , * y_pred , k = 2 ,
109+ normalize = True , sample_weight = None ,
110+ labels = None ,
111+ num_samples : int = 500 ,
112+ n_jobs : int = - 1 ,
113+ use_tqdm = True ,
114+ ** kwargs ):
115+ """
116+ >>> from sklearn.svm import LinearSVC
117+ >>> from sklearn.ensemble import RandomForestClassifier
118+ >>> from sklearn.datasets import load_iris
119+ >>> from sklearn.model_selection import train_test_split
120+ >>> from sklearn.base import clone
121+ >>> from CompStats.metrics import top_k_accuracy_score
122+ >>> X, y = load_iris(return_X_y=True)
123+ >>> _ = train_test_split(X, y, test_size=0.3)
124+ >>> X_train, X_val, y_train, y_val = _
125+ >>> m = LinearSVC().fit(X_train, y_train)
126+ >>> hy = m.decision_function(X_val)
127+ >>> score = top_k_accuracy_score(y_val, hy, n_jobs=1,
128+ labels=[0, 1, 2])
129+ >>> score
130+ <Perf>
131+ Prediction statistics with standard error
132+ forest = 0.957 (0.031)
133+ alg-1 = 0.935 (0.037)
134+ >>> diff = score.difference()
135+ >>> diff
136+ <Difference>
137+ difference p-values w.r.t forest
138+ alg-1 0.254
139+ """
140+
141+ def inner (y , hy ):
142+ return metrics .top_k_accuracy_score (y , hy , k = k ,
143+ normalize = normalize , sample_weight = sample_weight ,
144+ labels = labels )
145+ return Perf (y_true , * y_pred , score_func = inner ,
146+ num_samples = num_samples , n_jobs = n_jobs ,
147+ use_tqdm = use_tqdm ,
148+ ** kwargs )
149+
150+
107151@perf_docs
108152def f1_score (y_true , * y_pred , labels = None , pos_label = 1 ,
109153 average = 'binary' , sample_weight = None ,
0 commit comments