- added l2_normalization after PCA. max. Forces Feature dist between [0,2]

Todos:
process output information
parent 1a6b86a5
......@@ -2,6 +2,7 @@ from Training.delf import Delf
from Training.dataloader import get_data_loaders, get_path_data_loader
from pathlib import Path
from Training.utils import score_match, visualize_match, calculate_pca, format_time
from sklearn.preprocessing import normalize
import torch
import numpy as np
import uuid
......@@ -24,8 +25,8 @@ class ExperimentManager:
"""
def __init__(self, experiment_name, stages, dataset, validation_split=0.2, batch_size=8, num_workers=4, epochs=30,
learning_rate=0.008, learning_rate_gamma=0.5, learning_rate_step_size=10, weight_decay=0.0001,
load_from=None, target_layer="layer3", use_l2_normalization=True, pca_dataset=None, pca_load=None,
pca_log=False):
load_from=None, target_layer="layer3", use_l2_normalization=True, use_retrieval_normalization=True,
pca_dataset=None, pca_load=None, pca_log=False):
print(f"Current working directory is {Path.cwd()}")
print(f"Running on CUDA:{torch.cuda.is_available()}")
print("Preparing experiment:")
......@@ -99,6 +100,9 @@ class ExperimentManager:
self.target_layer = check_experiment_wide_parameter(target_layer, "target_layer", str, ["layer3", "layer4"])
self.use_l2_normalization = check_experiment_wide_parameter(use_l2_normalization, "use_l2_normalizazion",
bool, [True, False])
self.use_retrieval_normalization = check_experiment_wide_parameter(use_retrieval_normalization,
"use_retrieval_normalization", bool,
[True, False])
self.dataset = self.check_and_parse_parameter(parameter=dataset,
parameter_name="dataset",
required_type=str,
......@@ -405,6 +409,8 @@ class ExperimentManager:
if pca:
# if we already have pca calculated we can apply it now and save space
features = pca.transform(features.numpy())
if self.use_retrieval_normalization:
features = normalize(features, norm='l2', axis=1)
# create data entry for the image
index_extraction_list.append((path[0], label, features, rf_centers.numpy()))
if use_index_for_pca:
......@@ -418,7 +424,13 @@ class ExperimentManager:
# calculate pca and apply it to already extracted features if we have not done so already
pca = calculate_pca(accumulated_features, stage_path.joinpath(
f"{self.load_paths['retrieval'].stem}_{self.dataset['retrieval'].stem}.pca"), self.pca_log)
index_extraction_list = [(entry[0], entry[1], pca.transform(entry[2].numpy()), entry[3]) for entry in index_extraction_list]
if self.use_retrieval_normalization:
index_extraction_list = [
(entry[0], entry[1], normalize(pca.transform(entry[2].numpy()), norm='l2', axis=1), entry[3])
for entry in index_extraction_list]
else:
index_extraction_list = [(entry[0], entry[1], pca.transform(entry[2].numpy()), entry[3]) for entry
in index_extraction_list]
query_dataset = Path(query_dataset)
query_data_loader = get_path_data_loader(query_dataset, num_workers=self.num_workers["retrieval"])
......@@ -433,6 +445,8 @@ class ExperimentManager:
features = torch.nn.functional.normalize(features, p=2, dim=1)
# apply pca
features = pca.transform(features.numpy())
if self.use_retrieval_normalization:
features = normalize(features, norm='l2', axis=1)
# TODO DELF wants to normalize again after pca, other impl says no, same for index features
rf_centers = rf_centers.numpy()
now = time.time()
......@@ -473,11 +487,11 @@ def check_experiment_wide_parameter(parameter, parameter_name, required_type, al
def fire_experiment(experiment_name, stages, dataset, validation_split=0.2, batch_size=8, num_workers=4, epochs=30,
learning_rate=0.008, learning_rate_gamma=0.5, learning_rate_step_size=10, weight_decay=0.0001,
load_from=None, target_layer="layer3", use_l2_normalization=True, pca_dataset=None, pca_load=None,
pca_log=False):
load_from=None, target_layer="layer3", use_l2_normalization=True, use_retrieval_normalization=True,
pca_dataset=None, pca_load=None, pca_log=False):
ExperimentManager(experiment_name, stages, dataset, validation_split, batch_size, num_workers, epochs,
learning_rate, learning_rate_gamma, learning_rate_step_size, weight_decay, load_from,
target_layer, use_l2_normalization, pca_dataset, pca_load, pca_log)
target_layer, use_l2_normalization, use_retrieval_normalization, pca_dataset, pca_load, pca_log)
#torch.backends.cudnn.benchmark = True
#exp = ExperimentManager("30_epoch_run", {"finetuning","keypoints","retrieval"}, {"finetuning": "../Datasets/Landmarks", "keypoints": "../../Datasets/Landmarks", "retrieval": "../../Datasets/Oxford"}, epochs=30)
......
......@@ -16,10 +16,10 @@ RF_VALUES = {"layer3": (267, 16, 133), "layer4": (427, 32, 213)}
PCA_COMPONENTS = 40
KD_TREE_DISTANCE_THRESHOLD = 4.5 # has to be evaluated
KD_TREE_DISTANCE_THRESHOLD = 0.8 #4.5 # has to be evaluated
RANSAC_MIN_SAMPLES = 3
RANSAC_NUM_TRAILS = 1000
RANSAC_RESIDUAL_THRESHOLD = 20
RANSAC_RESIDUAL_THRESHOLD = 12.5 #20
def score_match(index_features, query_features, index_locations, query_locations):
......@@ -40,7 +40,7 @@ def score_match(index_features, query_features, index_locations, query_locations
])
#print(cleaned_index_locations)
#print(cleaned_query_locations)
#print(cleaned_query_locations.shape)
print(cleaned_query_locations.size)
if cleaned_query_locations.shape[0] <= RANSAC_MIN_SAMPLES:
return [False], None, None
# Perform geometric verification using RANSAC.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment