removing some deprecated input parameters

started thinking about evaluation
parent 3b477d3f
import json
from pathlib import Path
def score_result_file(result_file, gt_file):
result_file = Path(result_file)
gt_file = Path(gt_file)
with result_file.open("r") as result_json:
result = json.load(result_json)
query = result["query"]
matches = result["results"]
with gt_file.open("r") as gt_json:
gt_data = json.load(gt_json)
gt = gt_data["gt"]
# sorted by ransac verified matches
matches.sort(key=lambda x: x[1], reverse=True)
# ransac matches
ransac = [True if match[0] in gt else False for match in matches]
# sorted by feature matches
matches.sort(key=lambda x: x[2], reverse=True)
feature = [True if match[0] in gt else False for match in matches]
print(ransac)
print(feature)
print(matches)
print(gt)
score_result_file("../Experiments/30_epoch_run/retrieval/query_results/bodleian_000107", "../../Datasets/Oxford_new_gt/bodleian_000107_gt.json")
score_result_file("../Experiments/30_epoch_run/retrieval/query_results/radcliffe_camera_000519", "../../Datasets/Oxford_new_gt/radcliffe_camera_000519_gt.json")
\ No newline at end of file
......@@ -27,7 +27,7 @@ class ExperimentManager:
def __init__(self, experiment_name, stages, dataset, validation_split=0.2, batch_size=8, num_workers=4, epochs=30,
learning_rate=0.008, learning_rate_gamma=0.5, learning_rate_step_size=10, weight_decay=0.0001,
load_from=None, target_layer="layer3", use_l2_normalization=True, use_retrieval_normalization=True,
pca_dataset=None, pca_load=None, pca_log=False, visualize_top_matches= False):
pca_log=False, visualize_top_matches=False):
print(f"Current working directory is {Path.cwd()}")
print(f"Running on CUDA:{torch.cuda.is_available()}")
print("Preparing experiment:")
......@@ -109,23 +109,8 @@ class ExperimentManager:
required_type=str,
required_in_stages={"finetuning", "keypoints", "retrieval"}
)
# TODO clean this up we are not doing different pca modes
self.pca_dataset = pca_dataset
self.pca_load = pca_load
self.pca_log = pca_log
self.visualize_top_matches = visualize_top_matches
if "retrieval" in self.stages:
if self.pca_dataset is not None:
self.pca_dataset = Path(pca_dataset)
assert self.pca_dataset.is_dir(), f"Dataset for pca calculation at {self.pca_dataset} not found!"
print(f"PCA is calculated on dedicated dataset at {self.pca_dataset}")
assert self.pca_load is None, "pca_dataset is set so pca_load should be None!"
elif pca_load is not None:
self.pca_load = Path(pca_load)
assert self.pca_load.is_file(), f"Did not find pca file at {self.pca_load}!"
print(f"Using pre-calculated pca matrix: {self.pca_load}")
else:
print(f"PCA is calculated on index part of retrieval dataset at {self.dataset['retrieval']}")
# convert dataset path strings to proper paths
self.dataset = {stage: Path(self.dataset[stage])for stage in self.dataset.keys()}
# ensure dataset paths exist
......@@ -147,8 +132,7 @@ class ExperimentManager:
if "keypoints" in self.stages:
self.train_model("keypoints")
if "retrieval" in self.stages:
#self.perform_retrieval()
self.perform_retrieval_2()
self.perform_retrieval()
def get_load_and_save_paths(self, load_from):
"""
......@@ -347,7 +331,7 @@ class ExperimentManager:
json.dump(log, json_file, ensure_ascii=False)
print(f"Completed {stage} training!")
def perform_retrieval_2(self):
def perform_retrieval(self):
# load model in retrieval mode
model = Delf(None, "retrieval", self.load_paths["retrieval"], target_layer=self.target_layer,
use_l2_normalization=self.use_l2_normalization).to(self.device)
......@@ -426,7 +410,7 @@ class ExperimentManager:
log_file = result_path.joinpath(query_filename)
with log_file.open('w', encoding='utf-8') as json_log:
json.dump(log_data, json_log, ensure_ascii=False)
"""
def perform_retrieval(self):
# load model in retrieval mode
model = Delf(None, "retrieval", self.load_paths["retrieval"], target_layer=self.target_layer,
......@@ -550,6 +534,8 @@ class ExperimentManager:
print(sum(matching_results[i][0]))
visualize_match(matching_results[i][3],path[0],matching_results[i][1], matching_results[i][2], matching_results[i][0])
"""
def check_experiment_wide_parameter(parameter, parameter_name, required_type, allowed_values=None):
if isinstance(parameter, dict):
......@@ -572,19 +558,12 @@ def check_experiment_wide_parameter(parameter, parameter_name, required_type, al
def fire_experiment(experiment_name, stages, dataset, validation_split=0.2, batch_size=8, num_workers=4, epochs=30,
learning_rate=0.008, learning_rate_gamma=0.5, learning_rate_step_size=10, weight_decay=0.0001,
load_from=None, target_layer="layer3", use_l2_normalization=True, use_retrieval_normalization=True,
pca_dataset=None, pca_load=None, pca_log=False, visualize_top_matches= False):
pca_log=False, visualize_top_matches=False):
ExperimentManager(experiment_name, stages, dataset, validation_split, batch_size, num_workers, epochs,
learning_rate, learning_rate_gamma, learning_rate_step_size, weight_decay, load_from,
target_layer, use_l2_normalization, use_retrieval_normalization, pca_dataset, pca_load, pca_log,
target_layer, use_l2_normalization, use_retrieval_normalization, pca_log,
visualize_top_matches)
#torch.backends.cudnn.benchmark = True
#exp = ExperimentManager("30_epoch_run", {"finetuning","keypoints","retrieval"}, {"finetuning": "../Datasets/Landmarks", "keypoints": "../../Datasets/Landmarks", "retrieval": "../../Datasets/Oxford"}, epochs=30)
#exp = ExperimentManager("30_epoch_run", {"retrieval"}, {"retrieval": "../Datasets/Oxford"}, load_from={"retrieval":"Experiments/30_epoch_run/keypoints/e3f41a23_d0f3a26d.pth"})
#exp = ExperimentManager("layer4", {"retrieval"}, {"retrieval": "../Datasets/Oxford"}, epochs=1, target_layer='layer4', load_from={"retrieval": "Experiments/layer4/keypoints/90a267db_05ef12cd.pth"})
#exp = ExperimentManager("all_stages", {"keypoints","retrieval"}, {"keypoints":"../Datasets/Landmarks", "retrieval": "../Datasets/Oxford"}, epochs=1, load_from={"keypoints":"Experiments/all_stages/keypoints/c48957fe_6d39b7dc.pth"})
#exp = ExperimentManager("variable target layer", {"keypoints"}, "../Datasets/Landmarks", epochs=1, load_from={"keypoints":"Experiments/variable target layer/finetuning/5db43e8d.pth"})
if __name__ == '__main__':
fire.Fire(fire_experiment)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment