oxford_sorter.py is experimental. Mostly used it to get the index set but have...

oxford_sorter.py is experimental. Mostly used it to get the index set but have to understand the dataset better before building it.
- removed extract_features function form util -> model can now get all features per image, experiment manager will run retrieval/ data loading
- added custom dataset to dataloader.py that also returns filepaths
- perform retrieval so far extracts index features and performs pca

note: pca in test run only explains ~40% of variance. is that feasible?

TODO: extract features should return rf centers instead of boxes
write matching function
incorporate it into experiment manager
parent d6031024
from pathlib import Path
GT_PATH = "../../Oxford/gt_files"
IMG_PATH = "../../Oxford/oxbuild_images"
OUT_BASE_PATH = "../../Oxford"
def move_file(f_name, base_path):
load_path = Path(IMG_PATH).joinpath(Path(f_name))
save_path = Path(base_path).joinpath(Path(f_name))
if Path(IMG_PATH).joinpath(Path(f_name)).is_file():
print(f"moved {load_path} to {save_path}")
print(f"{load_path} not found. Skipping file!")
def build_index_set():
out_base_path = Path(OUT_BASE_PATH)
for file in Path(GT_PATH).iterdir():
file_split = str.split(file.stem, "_")
image_grade = file_split[-1]
image_category = file_split[:-2]
image_category = "_".join(image_category)
print(f"Contains image_grad {image_grade} of category {image_category}")
if image_grade != "query":
out_category_path = out_base_path.joinpath(Path(image_grade), Path(image_category))
out_category_path = out_base_path.joinpath(Path("index"), Path(image_category))
if not out_category_path.is_dir():
with file.open('r') as gt_file:
if image_grade != "query":
filenames = [line.replace("\n", ".jpg") for line in gt_file.readlines()]
for filename in filenames:
move_file(filename, out_category_path)
filename = f"{gt_file.readline().split(' ')[0][5:]}.jpg"
move_file(filename, out_category_path)
def sort_query_based_on_jpg_name():
out_base_path = Path(OUT_BASE_PATH)
for file in Path(IMG_PATH).iterdir():
category_parts = file.stem.split("_")[:-1]
category = "_".join(category_parts)
out_dir = out_base_path.joinpath(Path(category))
if not out_dir.is_dir():
......@@ -31,6 +31,18 @@ def get_data_loaders(dataset_path, validation_split, batch_size, num_workers, ce
return training_loader, validation_loader
class ImageFolderWithPaths(torchvision.datasets.ImageFolder):
def __getitem__(self, item):
base_tuple = super(ImageFolderWithPaths, self).__getitem__(item)
filepath = self.imgs[item][0]
return base_tuple + (filepath,)
def get_path_data_loader(dataset_path, batch_size=1, num_workers=4):
dataset = ImageFolderWithPaths(root=dataset_path, transform=transforms.ToTensor())
return torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
from pathlib import Path
import time
......@@ -48,4 +60,14 @@ for i, batch in enumerate(tl, 0):
inputs, labels = batch[0].to(device), batch[1].to(device)
from pathlib import Path
loader = get_path_data_loader(Path("../../Datasets/Landmarks"))
norm_loader = torchvision.datasets.ImageFolder(Path("../../Datasets/Landmarks"), transform=transforms.ToTensor())
for input, label, path in loader:
\ No newline at end of file
......@@ -4,9 +4,11 @@ import torch
import torchvision
import copy
from Training.layers import Flatten, SpatialAttention2d, WeightedSum2d, __gamma_rescale__
from Training.utils import get_receptive_boxes, get_top_k_index
SCALES = [2, 1.4142, 1, 0.7071, 0.5, 0.3536, 0.25]
RF_VALUES = {"layer3": (267, 16, 133), "layer4": (427, 32, 213)}
class Delf(torch.nn.Module):
......@@ -158,6 +160,44 @@ class Delf(torch.nn.Module):
attention_score = self.module_dict['attention'](feature)
return feature, attention_score
def extract_features(self, image_input):
all_scale_features = None
all_scale_scores = None
all_scale_boxes = None
for scale in SCALES:
# get features and scores from model
features, attention_scores = self.single_scale_retrieval(scale, image_input)
# we will not be using the features in cuda anymore so detach to save memory
features = features.detach().cpu()
# calculate the receptive boxes in original scale
receptive_boxes = get_receptive_boxes(features.size(2), features.size(3), scale)
# print(receptive_boxes)
# print(receptive_boxes.size())
# flatten h,w dimensions so we can append values from different scales
# spatial information can be inferred from the receptive boxes
features = features.view(features.size(1), -1).t()
attention_scores = attention_scores.view(-1)
receptive_boxes = receptive_boxes.view(-1, 4)
# accumulate the values of all scales
if all_scale_features is None:
all_scale_features = features
all_scale_features = torch.cat((all_scale_features, features), dim=0)
if all_scale_scores is None:
all_scale_scores = attention_scores
all_scale_scores = torch.cat((all_scale_scores, attention_scores), dim=0)
if all_scale_boxes is None:
all_scale_boxes = receptive_boxes
all_scale_boxes = torch.cat((all_scale_boxes, receptive_boxes), dim=0)
# perform nms based on scores and receptive boxes then keep the top 1000 scoring boxes remaining
keep = get_top_k_index(all_scale_scores, all_scale_boxes.cuda(), iou_threshold=0.8, k=1000)
# index select the best boxes
all_scale_boxes = all_scale_boxes[keep]
all_scale_features = all_scale_features[keep]
return all_scale_features, all_scale_boxes
def __load_weights_from__(module_dict, load_dict, module_names):
for module_name in module_names:
......@@ -177,6 +217,9 @@ def __freeze_modules__(module_dict, modules_to_freeze=[]):
#mydelf = Delf(10,"finetuning")
#testin = torch.rand((8,3,224,224))
from Training.delf import Delf
from Training.dataloader import get_data_loaders
from Training.dataloader import get_data_loaders, get_path_data_loader
from sklearn.decomposition import PCA
from pathlib import Path
import torch
import numpy as np
import uuid
import time
import json
import pickle
SCALES = [2, 1.4142, 1, 0.7071, 0.5, 0.3536, 0.25]
class ExperimentManager:
......@@ -307,7 +308,7 @@ class ExperimentManager:
print(f"new best epoch! saving model to {model_path}")
# create checkpoint of model with lowest validation loss so far
torch.save(model.get_state_for_saving(), model_path)
# TODO ensure this is working correctly by printing the learning rate in the logger
# learning rate scheduler is slightly buggy in this version of torch but should not matter for training.
log_path = Path.joinpath(model_path.parent, Path(f"{model_path.stem}_log.json"))
log = {"Experiment": self.experiment_path.stem.__str__(),
......@@ -325,6 +326,94 @@ class ExperimentManager:
json.dump(log, json_file, ensure_ascii=False)
print(f"Completed {stage} training!")
def perform_retrieval(self, model_path, index_dataset, query_dataset, pca_load=None, pca_dataset=None, pca_save=None):
# load model in retrieval mode
model = Delf(None, "retrieval", model_path).to(self.device)
# used to check if pca has been provided or is calculated on dedicated data
use_index_for_pca = False
pca = None
with torch.no_grad():
if pca_load:
# load pca from pickle file
pca_load = Path(pca_load)
pca = pickle.load(pca_load.open("rb"))
print(f"loaded pca matrix from {pca_load}")
elif pca_dataset:
print(f"calculating pca matrix on {pca_dataset}")
accumulated_features = None
pca_dataset = Path(pca_dataset)
pca_data_loader = get_path_data_loader(pca_dataset)
# iterate over dataset
for ind, batch in enumerate(pca_data_loader):
# get img tensor
image_input = batch[0].to(self.device)
# get features, ignore boxes
features, _ = model.extract_features(image_input)
# accumulate all features in one container
if accumulated_features is None:
accumulated_features = features
accumulated_features = torch.cat((accumulated_features, features), dim=0)
print(f"[{ind+1}/{len(pca_data_loader)}] gathering features for pca:"
f" {accumulated_features.size()[0]} features gathered")
# Delf recommends normalizing features before pca
accumulated_features = torch.nn.functional.normalize(accumulated_features, p=2, dim=1)
# sckit pca needs numpy array as input
accumulated_features = accumulated_features.numpy()
# calculate pca matrix and save to file
pca = calculate_pca(accumulated_features, Path(pca_save))
# if we do not have a pca matrix yet, use the index set
print(f"calculating pca matrix on {index_dataset}")
use_index_for_pca = True
# container for all the gathered index information
index_extraction_list = []
if use_index_for_pca:
# accumulator for pca
accumulated_features = None
# iterate over dataset
index_dataset = Path(index_dataset)
index_data_loader = get_path_data_loader(index_dataset)
for ind, batch in enumerate(index_data_loader):
# get all image info from loader
image_input, label, path = batch
image_input = image_input.to(self.device)
# TODO make model return box centers instead of boxes
# get features and rf info from model
features, boxes = model.extract_features(image_input)
# Delf says normalize before pca
features = torch.nn.functional.normalize(features, p=2, dim=1)
if pca:
# if we already have pca calculated we can apply it now and save space
features = pca.transform(features.numpy())
# create data entry for the image
index_extraction_list.append((Path(path[0]).stem, label, features, boxes.numpy()))
if use_index_for_pca:
# if we still have to calculate pca: accumulate features
if accumulated_features is None:
accumulated_features = features
accumulated_features = torch.cat((accumulated_features, features), dim=0)
print(f"[{ind + 1}/{len(index_data_loader)}] gathering index features")
if use_index_for_pca:
# calculate pca and apply it to already extracted features if we have not done so already
pca = calculate_pca(accumulated_features, Path("pca.pkl"))
index_extraction_list = [(entry[0], entry[1], pca.transform(entry[2].numpy()), entry[3]) for entry in index_extraction_list]
def calculate_pca(data, save_path=None):
pca = PCA(n_components=PCA_COMPONENTS, whiten=True)
print(f"calculated pca matrix. Explained variance is {sum(pca.explained_variance_ratio_):.2f}"
f" over {pca.n_components} components")
if save_path:
print(f"saving pca data to {save_path}")
pickle.dump(pca, save_path.open("wb"))
return pca
def format_time(seconds):
minutes, seconds = divmod(seconds.__int__(), 60)
......@@ -351,6 +440,6 @@ def check_experiment_wide_parameter(parameter, parameter_name, required_type, al
torch.backends.cudnn.benchmark = True
exp = ExperimentManager("variable_target_layer", {"retrieval"}, "../../Datasets/Oxford/index", load_from={"retrieval":"../Experiments/variable target layer/keypoints/5db43e8d_dbb65c50.pth"}).perform_retrieval("../Experiments/variable target layer/keypoints/5db43e8d_dbb65c50.pth", "../../Datasets/Oxford/index", "asd", pca_dataset="../../Datasets/Oxford/index", pca_save="pca.pkl")
#exp = ExperimentManager("variable target layer", {"finetuning","keypoints"}, "../Datasets/Landmarks", epochs=1)
#exp = ExperimentManager("variable target layer", {"keypoints"}, "../Datasets/Landmarks", epochs=1, load_from={"keypoints":"Experiments/variable target layer/finetuning/5db43e8d.pth"})
import torch
import torchvision
from sklearn.decomposition import PCA
from Training.delf import Delf
#from sklearn.decomposition import PCA
#from Training.delf import Delf
#import pickle
SCALES = [2, 1.4142, 1, 0.7071, 0.5, 0.3536, 0.25]
RF_VALUES = {"layer3": (267, 16, 133), "layer4": (427, 32, 213)}
# TODO will be moved to experiment when model/ dataloaders are available
def extract_features(self, stage):
def extract_features(self, stage, pca=None):
model = Delf(10, "retrieval", "../Experiments/variable target layer/keypoints/5db43e8d_dbb65c50.pth").cuda()
all_features = None
all_boxes = None
feature_list = []
with torch.no_grad():
for image in range(2): # here is where i put the dataloader
for image in range(5): # here is where i put the dataloader
# TODO think about limiting the image size ranges. This works if all imgs are 2000X2000 but might use up
# more mem if size is variable, also much larger images need more than 8gb vram
# Could also restrict larger scales for big images
data = torch.rand((1,3,500,200)).cuda()# c x h x w
data = torch.rand((1,3,1000,1000)).cuda()# c x h x w
label = 0
filename = "blub"
all_scale_features = None
all_scale_scores = None
all_scale_boxes = None
......@@ -28,8 +33,8 @@ def extract_features(self, stage):
features = features.detach().cpu()
# calculate the receptive boxes in original scale
receptive_boxes = get_receptive_boxes(features.size(2), features.size(3), scale)
# flatten h,w dimensions so we can append values from different scales
# spatial information can be inferred from the receptive boxes
features = features.view(features.size(1), -1).t()
......@@ -54,10 +59,14 @@ def extract_features(self, stage):
# we do not need the values in cuda anymore
all_scale_boxes = all_scale_boxes[keep].detach().cpu()
# could probably just ignore scores at this point
all_scale_scores = all_scale_scores[keep].detach().cpu()
# all_scale_scores = all_scale_scores[keep].detach().cpu()
all_scale_features = all_scale_features[keep]
all_scale_features, all_scale_boxes = model.extract_features(data)
# for pca we can just stitch all features together for retrieval we need some kind of reference
# to which image they belong
extracted_data = (all_scale_features, all_scale_boxes, label, filename)
if all_features is None:
all_features = all_scale_features
......@@ -66,20 +75,26 @@ def extract_features(self, stage):
all_boxes = all_scale_boxes
all_boxes = torch.cat((all_boxes, all_scale_boxes), dim=0)
# TODO check if we should normalize features before pca, other than whiten
# TODO tf repo says l2 normalize features before
# convert features to numpy for pca
all_features = all_features.numpy()
# fit pca
delf_pca = PCA(n_components=40, whiten=True)
pickle.dump(delf_pca, open("pca.pkl", "wb"))
print(f"pca componenets {delf_pca.components_} explained var ratio {delf_pca.explained_variance_ratio_}")
# test application of pca
test_sample = torch.rand(size=(1,1024)).numpy()
l_pca = pickle.load(open("pca.pkl", "rb"))
# TODO tf repo says l2 normalize features after
def get_receptive_boxes(height, width, scale, target_layer="layer3"):
calculates the receptive boxes for a feature map, based on its height and width and the RF parameters of the
......@@ -122,5 +137,3 @@ def get_top_k_index(scores, boxes, iou_threshold, k):
keep = keep[:k]
return keep
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment