justinkay
commited on
Commit
·
f6adf18
1
Parent(s):
e7063f6
Change learning rate, small text changes
Browse files
app.py
CHANGED
|
@@ -58,6 +58,8 @@ MODEL_INFO = [
|
|
| 58 |
{"org": "Imageomics", "name": "BioCLIP", "logo": "logos/imageomics.png"}
|
| 59 |
]
|
| 60 |
|
|
|
|
|
|
|
| 61 |
# load image metadata
|
| 62 |
images_data = []
|
| 63 |
for annotation in tqdm(data['annotations'], desc='Loading annotations'):
|
|
@@ -124,7 +126,7 @@ def get_model_predictions(chosen_idx):
|
|
| 124 |
confidence = model_scores[predicted_class_idx]
|
| 125 |
|
| 126 |
model_info = MODEL_INFO[model_idx]
|
| 127 |
-
predictions_list.append(f"**{model_info['
|
| 128 |
|
| 129 |
predictions_text = "### Model Predictions\n\n" + " | ".join(predictions_list)
|
| 130 |
|
|
@@ -596,6 +598,7 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 596 |
so that you will be equipped to provide ground truth labels. Then, watch as CODA narrows down the best model over time
|
| 597 |
as you provide labels for the query images. You will see that with your input CODA is able to identify the best model candidate
|
| 598 |
with as few as ten (correctly) labeled images.
|
|
|
|
| 599 |
""")
|
| 600 |
|
| 601 |
# Species guide content (initially hidden)
|
|
@@ -806,7 +809,8 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 806 |
|
| 807 |
# Create oracle and CODA selector for this user
|
| 808 |
oracle = Oracle(dataset, loss_fn=loss_fn)
|
| 809 |
-
coda_selector = CODA(dataset
|
|
|
|
| 810 |
|
| 811 |
image, status, predictions = get_next_coda_image()
|
| 812 |
prob_plot = create_probability_chart()
|
|
@@ -844,7 +848,8 @@ with gr.Blocks(title="CODA: Wildlife Photo Classification Challenge",
|
|
| 844 |
|
| 845 |
# Create oracle and CODA selector for this user
|
| 846 |
oracle = Oracle(dataset, loss_fn=loss_fn)
|
| 847 |
-
coda_selector = CODA(dataset
|
|
|
|
| 848 |
|
| 849 |
# Reset all displays
|
| 850 |
prob_plot = create_probability_chart()
|
|
|
|
| 58 |
{"org": "Imageomics", "name": "BioCLIP", "logo": "logos/imageomics.png"}
|
| 59 |
]
|
| 60 |
|
| 61 |
+
DEMO_LEARNING_RATE = 0.05 # don't use default; use something more fun
|
| 62 |
+
|
| 63 |
# load image metadata
|
| 64 |
images_data = []
|
| 65 |
for annotation in tqdm(data['annotations'], desc='Loading annotations'):
|
|
|
|
| 126 |
confidence = model_scores[predicted_class_idx]
|
| 127 |
|
| 128 |
model_info = MODEL_INFO[model_idx]
|
| 129 |
+
predictions_list.append(f"**{model_info['name']}:** {predicted_class_name} *({confidence:.3f})*")
|
| 130 |
|
| 131 |
predictions_text = "### Model Predictions\n\n" + " | ".join(predictions_list)
|
| 132 |
|
|
|
|
| 598 |
so that you will be equipped to provide ground truth labels. Then, watch as CODA narrows down the best model over time
|
| 599 |
as you provide labels for the query images. You will see that with your input CODA is able to identify the best model candidate
|
| 600 |
with as few as ten (correctly) labeled images.
|
| 601 |
+
|
| 602 |
""")
|
| 603 |
|
| 604 |
# Species guide content (initially hidden)
|
|
|
|
| 809 |
|
| 810 |
# Create oracle and CODA selector for this user
|
| 811 |
oracle = Oracle(dataset, loss_fn=loss_fn)
|
| 812 |
+
coda_selector = CODA(dataset,
|
| 813 |
+
learning_rate=DEMO_LEARNING_RATE)
|
| 814 |
|
| 815 |
image, status, predictions = get_next_coda_image()
|
| 816 |
prob_plot = create_probability_chart()
|
|
|
|
| 848 |
|
| 849 |
# Create oracle and CODA selector for this user
|
| 850 |
oracle = Oracle(dataset, loss_fn=loss_fn)
|
| 851 |
+
coda_selector = CODA(dataset,
|
| 852 |
+
learning_rate=DEMO_LEARNING_RATE)
|
| 853 |
|
| 854 |
# Reset all displays
|
| 855 |
prob_plot = create_probability_chart()
|