justinkay
commited on
Commit
·
046c99d
1
Parent(s):
090bc1e
Dynamic upper right window height
Browse files
app.py
CHANGED
|
@@ -770,7 +770,7 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 770 |
That is, CODA uses predictions from candidate models to guide the labeling process, querying you (a species identification expert)
|
| 771 |
for labels on a select few images that will most efficiently differentiate between your candidate machine learning models.
|
| 772 |
|
| 773 |
-
This demo lets you try CODA yourself! First, become a species identification expert by reading our classification guide
|
| 774 |
so that you will be equipped to provide ground truth labels. Then, watch as CODA narrows down the best model over time
|
| 775 |
as you provide labels for the query images. You will see that with your input CODA is able to identify the best model candidate
|
| 776 |
with as few as ten (correctly) labeled images.
|
|
@@ -896,8 +896,10 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 896 |
)
|
| 897 |
with gr.Group(visible=True, elem_classes="subtle-outline") as hidden_group:
|
| 898 |
with gr.Column(elem_classes="flex items-center justify-center h-full"):
|
| 899 |
-
|
| 900 |
-
|
|
|
|
|
|
|
| 901 |
hidden_text0 = gr.Markdown("""
|
| 902 |
# True model performance is hidden
|
| 903 |
""",
|
|
@@ -915,19 +917,26 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 915 |
)
|
| 916 |
gr.HTML("<div style='margin-top: 0.25em;'></div>")
|
| 917 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 918 |
with gr.Row():
|
| 919 |
-
|
| 920 |
-
|
| 921 |
-
with gr.Column(scale=2, min_width=200):
|
| 922 |
-
reveal_accuracy_button = gr.Button(
|
| 923 |
-
"🔍 Reveal Model Accuracies",
|
| 924 |
variant="secondary",
|
| 925 |
size="lg"
|
| 926 |
)
|
| 927 |
-
with gr.Column(scale=2):
|
| 928 |
-
pass
|
| 929 |
|
| 930 |
-
|
|
|
|
| 931 |
|
| 932 |
|
| 933 |
# Status display with help button
|
|
@@ -983,32 +992,44 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 983 |
# Reset the demo state
|
| 984 |
iteration_count = 0
|
| 985 |
|
| 986 |
-
#
|
| 987 |
-
|
| 988 |
-
|
| 989 |
-
|
| 990 |
-
|
| 991 |
-
|
| 992 |
-
|
| 993 |
-
|
| 994 |
-
|
| 995 |
-
|
| 996 |
-
|
| 997 |
-
|
| 998 |
-
|
| 999 |
-
|
| 1000 |
-
|
| 1001 |
-
|
| 1002 |
-
|
| 1003 |
-
|
| 1004 |
-
|
| 1005 |
-
|
| 1006 |
-
|
| 1007 |
-
|
| 1008 |
-
|
| 1009 |
-
|
| 1010 |
-
|
| 1011 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1012 |
|
| 1013 |
image, status, predictions = get_next_coda_image()
|
| 1014 |
prob_plot = create_probability_chart()
|
|
@@ -1023,32 +1044,44 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 1023 |
# Reset the demo state
|
| 1024 |
iteration_count = 0
|
| 1025 |
|
| 1026 |
-
#
|
| 1027 |
-
|
| 1028 |
-
|
| 1029 |
-
|
| 1030 |
-
|
| 1031 |
-
|
| 1032 |
-
|
| 1033 |
-
|
| 1034 |
-
|
| 1035 |
-
|
| 1036 |
-
|
| 1037 |
-
|
| 1038 |
-
|
| 1039 |
-
|
| 1040 |
-
|
| 1041 |
-
|
| 1042 |
-
|
| 1043 |
-
|
| 1044 |
-
|
| 1045 |
-
|
| 1046 |
-
|
| 1047 |
-
|
| 1048 |
-
|
| 1049 |
-
|
| 1050 |
-
|
| 1051 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1052 |
|
| 1053 |
# Reset all displays
|
| 1054 |
prob_plot = create_probability_chart()
|
|
@@ -1093,12 +1126,128 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 1093 |
|
| 1094 |
popup_start_button.click(
|
| 1095 |
fn=start_demo,
|
| 1096 |
-
outputs=[image_display, status_with_help, model_predictions_display, prob_plot, accuracy_plot, popup_overlay, result_display, selection_help_button]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1097 |
)
|
| 1098 |
|
| 1099 |
start_over_button.click(
|
| 1100 |
fn=start_over,
|
| 1101 |
-
outputs=[image_display, status_with_help, model_predictions_display, prob_plot, accuracy_plot, result_display, popup_overlay, selection_help_button]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1102 |
)
|
| 1103 |
|
| 1104 |
guide_button.click(
|
|
|
|
| 770 |
That is, CODA uses predictions from candidate models to guide the labeling process, querying you (a species identification expert)
|
| 771 |
for labels on a select few images that will most efficiently differentiate between your candidate machine learning models.
|
| 772 |
|
| 773 |
+
This demo lets you try CODA yourself! First, **become a species identification expert by reading our classification guide**
|
| 774 |
so that you will be equipped to provide ground truth labels. Then, watch as CODA narrows down the best model over time
|
| 775 |
as you provide labels for the query images. You will see that with your input CODA is able to identify the best model candidate
|
| 776 |
with as few as ten (correctly) labeled images.
|
|
|
|
| 896 |
)
|
| 897 |
with gr.Group(visible=True, elem_classes="subtle-outline") as hidden_group:
|
| 898 |
with gr.Column(elem_classes="flex items-center justify-center h-full"):
|
| 899 |
+
|
| 900 |
+
# example of how to add spacing:
|
| 901 |
+
# gr.HTML("<div style='margin-top: 2.9em;'></div>")
|
| 902 |
+
|
| 903 |
hidden_text0 = gr.Markdown("""
|
| 904 |
# True model performance is hidden
|
| 905 |
""",
|
|
|
|
| 917 |
)
|
| 918 |
gr.HTML("<div style='margin-top: 0.25em;'></div>")
|
| 919 |
|
| 920 |
+
# with gr.Row():
|
| 921 |
+
# with gr.Column(scale=2):
|
| 922 |
+
# pass
|
| 923 |
+
# with gr.Column(scale=1, min_width=100):
|
| 924 |
+
# reveal_accuracy_button = gr.Button(
|
| 925 |
+
# "🔍 Reveal",
|
| 926 |
+
# variant="secondary",
|
| 927 |
+
# size="lg"
|
| 928 |
+
# )
|
| 929 |
+
# with gr.Column(scale=2):
|
| 930 |
+
# pass
|
| 931 |
with gr.Row():
|
| 932 |
+
reveal_accuracy_button = gr.Button(
|
| 933 |
+
"🔍 Reveal True Model Accuracies",
|
|
|
|
|
|
|
|
|
|
| 934 |
variant="secondary",
|
| 935 |
size="lg"
|
| 936 |
)
|
|
|
|
|
|
|
| 937 |
|
| 938 |
+
# example of how to add spacing:
|
| 939 |
+
# gr.HTML("<div style='margin-top: 2.9em;'></div>")
|
| 940 |
|
| 941 |
|
| 942 |
# Status display with help button
|
|
|
|
| 992 |
# Reset the demo state
|
| 993 |
iteration_count = 0
|
| 994 |
|
| 995 |
+
# Keep resampling until we get a subset where BioCLIP2 is NOT the initial best model
|
| 996 |
+
while True:
|
| 997 |
+
# Subsample dataset for this user
|
| 998 |
+
subsampled_indices = []
|
| 999 |
+
for class_idx in sorted(full_class_to_indices.keys()):
|
| 1000 |
+
indices = full_class_to_indices[class_idx]
|
| 1001 |
+
sampled = np.random.choice(indices, size=min_class_size, replace=False)
|
| 1002 |
+
subsampled_indices.extend(sampled.tolist())
|
| 1003 |
+
|
| 1004 |
+
# Sort indices to maintain order
|
| 1005 |
+
subsampled_indices.sort()
|
| 1006 |
+
|
| 1007 |
+
# Create subsampled dataset for this user
|
| 1008 |
+
subsampled_preds = full_preds[:, subsampled_indices, :]
|
| 1009 |
+
subsampled_labels = full_labels[subsampled_indices]
|
| 1010 |
+
image_filenames = [full_image_filenames[idx] for idx in subsampled_indices]
|
| 1011 |
+
|
| 1012 |
+
# Create Dataset object with subsampled data
|
| 1013 |
+
dataset = Dataset.__new__(Dataset)
|
| 1014 |
+
dataset.preds = subsampled_preds
|
| 1015 |
+
dataset.labels = subsampled_labels
|
| 1016 |
+
dataset.device = device
|
| 1017 |
+
|
| 1018 |
+
# Create oracle and CODA selector for this user
|
| 1019 |
+
oracle = Oracle(dataset, loss_fn=loss_fn)
|
| 1020 |
+
coda_selector = CODA(dataset,
|
| 1021 |
+
learning_rate=DEMO_LEARNING_RATE,
|
| 1022 |
+
alpha=DEMO_ALPHA)
|
| 1023 |
+
|
| 1024 |
+
# Check which model is initially best
|
| 1025 |
+
probs_tensor = coda_selector.get_pbest()
|
| 1026 |
+
probabilities = probs_tensor.detach().cpu().numpy().flatten()
|
| 1027 |
+
best_idx = np.argmax(probabilities)
|
| 1028 |
+
|
| 1029 |
+
# BioCLIP2 is at index 3 - if it's not the best, we accept this subset
|
| 1030 |
+
if best_idx != 3:
|
| 1031 |
+
break
|
| 1032 |
+
# Otherwise, loop and resample
|
| 1033 |
|
| 1034 |
image, status, predictions = get_next_coda_image()
|
| 1035 |
prob_plot = create_probability_chart()
|
|
|
|
| 1044 |
# Reset the demo state
|
| 1045 |
iteration_count = 0
|
| 1046 |
|
| 1047 |
+
# Keep resampling until we get a subset where BioCLIP2 is NOT the initial best model
|
| 1048 |
+
while True:
|
| 1049 |
+
# Subsample dataset for this user (new random subsample)
|
| 1050 |
+
subsampled_indices = []
|
| 1051 |
+
for class_idx in sorted(full_class_to_indices.keys()):
|
| 1052 |
+
indices = full_class_to_indices[class_idx]
|
| 1053 |
+
sampled = np.random.choice(indices, size=min_class_size, replace=False)
|
| 1054 |
+
subsampled_indices.extend(sampled.tolist())
|
| 1055 |
+
|
| 1056 |
+
# Sort indices to maintain order
|
| 1057 |
+
subsampled_indices.sort()
|
| 1058 |
+
|
| 1059 |
+
# Create subsampled dataset for this user
|
| 1060 |
+
subsampled_preds = full_preds[:, subsampled_indices, :]
|
| 1061 |
+
subsampled_labels = full_labels[subsampled_indices]
|
| 1062 |
+
image_filenames = [full_image_filenames[idx] for idx in subsampled_indices]
|
| 1063 |
+
|
| 1064 |
+
# Create Dataset object with subsampled data
|
| 1065 |
+
dataset = Dataset.__new__(Dataset)
|
| 1066 |
+
dataset.preds = subsampled_preds
|
| 1067 |
+
dataset.labels = subsampled_labels
|
| 1068 |
+
dataset.device = device
|
| 1069 |
+
|
| 1070 |
+
# Create oracle and CODA selector for this user
|
| 1071 |
+
oracle = Oracle(dataset, loss_fn=loss_fn)
|
| 1072 |
+
coda_selector = CODA(dataset,
|
| 1073 |
+
learning_rate=DEMO_LEARNING_RATE,
|
| 1074 |
+
alpha=DEMO_ALPHA)
|
| 1075 |
+
|
| 1076 |
+
# Check which model is initially best
|
| 1077 |
+
probs_tensor = coda_selector.get_pbest()
|
| 1078 |
+
probabilities = probs_tensor.detach().cpu().numpy().flatten()
|
| 1079 |
+
best_idx = np.argmax(probabilities)
|
| 1080 |
+
|
| 1081 |
+
# BioCLIP2 is at index 3 - if it's not the best, we accept this subset
|
| 1082 |
+
if best_idx != 3:
|
| 1083 |
+
break
|
| 1084 |
+
# Otherwise, loop and resample
|
| 1085 |
|
| 1086 |
# Reset all displays
|
| 1087 |
prob_plot = create_probability_chart()
|
|
|
|
| 1126 |
|
| 1127 |
popup_start_button.click(
|
| 1128 |
fn=start_demo,
|
| 1129 |
+
outputs=[image_display, status_with_help, model_predictions_display, prob_plot, accuracy_plot, popup_overlay, result_display, selection_help_button],
|
| 1130 |
+
js="""
|
| 1131 |
+
() => {
|
| 1132 |
+
console.log('=== Panel Height Matching (Dynamic) ===');
|
| 1133 |
+
|
| 1134 |
+
function matchPanelHeights() {
|
| 1135 |
+
const panels = document.querySelectorAll('.panel-container');
|
| 1136 |
+
console.log('Found .panel-container elements:', panels.length);
|
| 1137 |
+
const leftPanel = panels[0]; // prob_plot panel
|
| 1138 |
+
const rightPanel = document.querySelector('.subtle-outline'); // hidden_group panel
|
| 1139 |
+
|
| 1140 |
+
console.log('Left panel (prob):', leftPanel);
|
| 1141 |
+
console.log('Right panel (hidden):', rightPanel);
|
| 1142 |
+
|
| 1143 |
+
if (leftPanel && rightPanel) {
|
| 1144 |
+
const leftHeight = leftPanel.offsetHeight;
|
| 1145 |
+
const rightHeight = rightPanel.offsetHeight;
|
| 1146 |
+
const diff = leftHeight - rightHeight;
|
| 1147 |
+
|
| 1148 |
+
console.log('Left panel height:', leftHeight);
|
| 1149 |
+
console.log('Right panel height:', rightHeight);
|
| 1150 |
+
console.log('Height difference:', diff);
|
| 1151 |
+
|
| 1152 |
+
if (diff > 0) {
|
| 1153 |
+
console.log('Setting right panel min-height to:', leftHeight + 'px');
|
| 1154 |
+
|
| 1155 |
+
rightPanel.style.minHeight = leftHeight + 'px';
|
| 1156 |
+
rightPanel.style.display = 'flex';
|
| 1157 |
+
rightPanel.style.flexDirection = 'column';
|
| 1158 |
+
rightPanel.style.justifyContent = 'center';
|
| 1159 |
+
|
| 1160 |
+
console.log('Applied min-height and flex centering');
|
| 1161 |
+
return true; // Success
|
| 1162 |
+
} else {
|
| 1163 |
+
console.log('No height adjustment needed (diff <= 0)');
|
| 1164 |
+
return true; // Success
|
| 1165 |
+
}
|
| 1166 |
+
} else {
|
| 1167 |
+
console.log('Panels not ready yet');
|
| 1168 |
+
return false; // Not ready
|
| 1169 |
+
}
|
| 1170 |
+
}
|
| 1171 |
+
|
| 1172 |
+
// Check every 50ms for 3 seconds to catch multiple height changes
|
| 1173 |
+
let attempts = 0;
|
| 1174 |
+
const maxAttempts = 60; // 60 * 50ms = 3 seconds to catch both height changes
|
| 1175 |
+
const checkInterval = setInterval(() => {
|
| 1176 |
+
attempts++;
|
| 1177 |
+
console.log('Attempt', attempts, 'to match heights');
|
| 1178 |
+
|
| 1179 |
+
matchPanelHeights(); // Always try, don't stop early
|
| 1180 |
+
|
| 1181 |
+
if (attempts >= maxAttempts) {
|
| 1182 |
+
console.log('Finished checking after 3 seconds');
|
| 1183 |
+
clearInterval(checkInterval);
|
| 1184 |
+
}
|
| 1185 |
+
}, 50); // Check every 50ms
|
| 1186 |
+
}
|
| 1187 |
+
"""
|
| 1188 |
)
|
| 1189 |
|
| 1190 |
start_over_button.click(
|
| 1191 |
fn=start_over,
|
| 1192 |
+
outputs=[image_display, status_with_help, model_predictions_display, prob_plot, accuracy_plot, result_display, popup_overlay, selection_help_button],
|
| 1193 |
+
js="""
|
| 1194 |
+
() => {
|
| 1195 |
+
console.log('=== Panel Height Matching (Dynamic - Start Over) ===');
|
| 1196 |
+
|
| 1197 |
+
function matchPanelHeights() {
|
| 1198 |
+
const panels = document.querySelectorAll('.panel-container');
|
| 1199 |
+
console.log('Found .panel-container elements:', panels.length);
|
| 1200 |
+
const leftPanel = panels[0]; // prob_plot panel
|
| 1201 |
+
const rightPanel = document.querySelector('.subtle-outline'); // hidden_group panel
|
| 1202 |
+
|
| 1203 |
+
console.log('Left panel (prob):', leftPanel);
|
| 1204 |
+
console.log('Right panel (hidden):', rightPanel);
|
| 1205 |
+
|
| 1206 |
+
if (leftPanel && rightPanel) {
|
| 1207 |
+
const leftHeight = leftPanel.offsetHeight;
|
| 1208 |
+
const rightHeight = rightPanel.offsetHeight;
|
| 1209 |
+
const diff = leftHeight - rightHeight;
|
| 1210 |
+
|
| 1211 |
+
console.log('Left panel height:', leftHeight);
|
| 1212 |
+
console.log('Right panel height:', rightHeight);
|
| 1213 |
+
console.log('Height difference:', diff);
|
| 1214 |
+
|
| 1215 |
+
if (diff > 0) {
|
| 1216 |
+
console.log('Setting right panel min-height to:', leftHeight + 'px');
|
| 1217 |
+
|
| 1218 |
+
rightPanel.style.minHeight = leftHeight + 'px';
|
| 1219 |
+
rightPanel.style.display = 'flex';
|
| 1220 |
+
rightPanel.style.flexDirection = 'column';
|
| 1221 |
+
rightPanel.style.justifyContent = 'center';
|
| 1222 |
+
|
| 1223 |
+
console.log('Applied min-height and flex centering');
|
| 1224 |
+
return true; // Success
|
| 1225 |
+
} else {
|
| 1226 |
+
console.log('No height adjustment needed (diff <= 0)');
|
| 1227 |
+
return true; // Success
|
| 1228 |
+
}
|
| 1229 |
+
} else {
|
| 1230 |
+
console.log('Panels not ready yet');
|
| 1231 |
+
return false; // Not ready
|
| 1232 |
+
}
|
| 1233 |
+
}
|
| 1234 |
+
|
| 1235 |
+
// Check every 50ms for 3 seconds to catch multiple height changes
|
| 1236 |
+
let attempts = 0;
|
| 1237 |
+
const maxAttempts = 60; // 60 * 50ms = 3 seconds to catch both height changes
|
| 1238 |
+
const checkInterval = setInterval(() => {
|
| 1239 |
+
attempts++;
|
| 1240 |
+
console.log('Attempt', attempts, 'to match heights');
|
| 1241 |
+
|
| 1242 |
+
matchPanelHeights(); // Always try, don't stop early
|
| 1243 |
+
|
| 1244 |
+
if (attempts >= maxAttempts) {
|
| 1245 |
+
console.log('Finished checking after 3 seconds');
|
| 1246 |
+
clearInterval(checkInterval);
|
| 1247 |
+
}
|
| 1248 |
+
}, 50); // Check every 50ms
|
| 1249 |
+
}
|
| 1250 |
+
"""
|
| 1251 |
)
|
| 1252 |
|
| 1253 |
guide_button.click(
|