Reordered functions in cli module
Signed-off-by: Jim Martens <github@2martens.de>
This commit is contained in:
@ -40,6 +40,21 @@ def config(args: argparse.Namespace) -> None:
|
|||||||
conf.list_property_values()
|
conf.list_property_values()
|
||||||
|
|
||||||
|
|
||||||
|
def prepare(args: argparse.Namespace) -> None:
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
from twomartens.masterthesis import data
|
||||||
|
|
||||||
|
file_names_photos, file_names_instances, instances = data.prepare_scenenet_data(args.scenenet_path,
|
||||||
|
args.protobuf_path)
|
||||||
|
with open(f"{args.ground_truth_path}/photo_paths.bin", "wb") as file:
|
||||||
|
pickle.dump(file_names_photos, file)
|
||||||
|
with open(f"{args.ground_truth_path}/instance_paths.bin", "wb") as file:
|
||||||
|
pickle.dump(file_names_instances, file)
|
||||||
|
with open(f"{args.ground_truth_path}/instances.bin", "wb") as file:
|
||||||
|
pickle.dump(instances, file)
|
||||||
|
|
||||||
|
|
||||||
def train(args: argparse.Namespace) -> None:
|
def train(args: argparse.Namespace) -> None:
|
||||||
if args.network == "ssd" or args.network == "bayesian_ssd":
|
if args.network == "ssd" or args.network == "bayesian_ssd":
|
||||||
_ssd_train(args)
|
_ssd_train(args)
|
||||||
@ -47,6 +62,96 @@ def train(args: argparse.Namespace) -> None:
|
|||||||
_auto_encoder_train(args)
|
_auto_encoder_train(args)
|
||||||
|
|
||||||
|
|
||||||
|
def test(args: argparse.Namespace) -> None:
|
||||||
|
if args.network == "ssd" or args.network == "bayesian_ssd":
|
||||||
|
_ssd_test(args)
|
||||||
|
elif args.network == "auto_encoder":
|
||||||
|
_auto_encoder_test(args)
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate(args: argparse.Namespace) -> None:
|
||||||
|
if args.network == "ssd":
|
||||||
|
_ssd_evaluate(args)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
def visualise(args: argparse.Namespace) -> None:
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
from matplotlib import pyplot
|
||||||
|
import numpy as np
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from twomartens.masterthesis.ssd_keras.eval_utils import coco_utils
|
||||||
|
|
||||||
|
with open(f"{args.ground_truth_path}/photo_paths.bin", "rb") as file:
|
||||||
|
file_names = pickle.load(file)
|
||||||
|
with open(f"{args.ground_truth_path}/instances.bin", "rb") as file:
|
||||||
|
instances = pickle.load(file)
|
||||||
|
|
||||||
|
output_path = f"{args.output_path}/visualise/{args.trajectory}"
|
||||||
|
annotation_file_train = f"{args.coco_path}/annotations/instances_train2014.json"
|
||||||
|
cats_to_classes, _, cats_to_names, _ = coco_utils.get_coco_category_maps(annotation_file_train)
|
||||||
|
|
||||||
|
colors = pyplot.cm.hsv(np.linspace(0, 1, 81)).tolist()
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
nr_images = len(file_names[args.trajectory])
|
||||||
|
nr_digits = math.ceil(math.log10(nr_images))
|
||||||
|
for file_name, labels in zip(file_names[args.trajectory], instances[args.trajectory]):
|
||||||
|
if not labels:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# only loop through selected trajectory
|
||||||
|
with Image.open(file_name) as image:
|
||||||
|
figure = pyplot.figure(figsize=(20, 12))
|
||||||
|
pyplot.imshow(image)
|
||||||
|
|
||||||
|
current_axis = pyplot.gca()
|
||||||
|
|
||||||
|
for instance in labels:
|
||||||
|
bbox = instance['bbox']
|
||||||
|
# Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.
|
||||||
|
xmin = bbox[0]
|
||||||
|
ymin = bbox[1]
|
||||||
|
xmax = bbox[2]
|
||||||
|
ymax = bbox[3]
|
||||||
|
color = colors[cats_to_classes[int(instance['coco_id'])]]
|
||||||
|
label = f"{cats_to_names[int(instance['coco_id'])]}: {instance['wordnet_class_name']}, " \
|
||||||
|
f"{instance['wordnet_id']}"
|
||||||
|
current_axis.add_patch(
|
||||||
|
pyplot.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=False, linewidth=2))
|
||||||
|
current_axis.text(xmin, ymin, label, size='x-large', color='white',
|
||||||
|
bbox={'facecolor': color, 'alpha': 1.0})
|
||||||
|
pyplot.savefig(f"{output_path}/{str(i).zfill(nr_digits)}")
|
||||||
|
pyplot.close(figure)
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
|
||||||
|
def measure_mapping(args: argparse.Namespace) -> None:
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
from twomartens.masterthesis.ssd_keras.eval_utils import coco_utils
|
||||||
|
|
||||||
|
with open(f"{args.ground_truth_path}/instances.bin", "rb") as file:
|
||||||
|
instances = pickle.load(file)
|
||||||
|
|
||||||
|
output_path = f"{args.output_path}/measure/{args.tarball_id}"
|
||||||
|
annotation_file_train = f"{args.coco_path}/annotations/instances_train2014.json"
|
||||||
|
cats_to_classes, _, _, _ = coco_utils.get_coco_category_maps(annotation_file_train)
|
||||||
|
|
||||||
|
for i, trajectory in enumerate(instances):
|
||||||
|
counts = {cat_id: 0 for cat_id in cats_to_classes.keys()}
|
||||||
|
for labels in trajectory:
|
||||||
|
for instance in labels:
|
||||||
|
counts[instance['coco_id']] += 1
|
||||||
|
|
||||||
|
with open(f"{output_path}/{i}.bin", "wb") as file:
|
||||||
|
pickle.dump(counts, file)
|
||||||
|
|
||||||
|
|
||||||
def _ssd_train(args: argparse.Namespace) -> None:
|
def _ssd_train(args: argparse.Namespace) -> None:
|
||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
@ -200,109 +305,6 @@ def _auto_encoder_train(args: argparse.Namespace) -> None:
|
|||||||
channels=3, train_epoch=args.num_epochs, batch_size=batch_size)
|
channels=3, train_epoch=args.num_epochs, batch_size=batch_size)
|
||||||
|
|
||||||
|
|
||||||
def evaluate(args: argparse.Namespace) -> None:
|
|
||||||
if args.network == "ssd":
|
|
||||||
_ssd_evaluate(args)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
def _ssd_evaluate(args: argparse.Namespace) -> None:
|
|
||||||
import glob
|
|
||||||
import os
|
|
||||||
import pickle
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import tensorflow as tf
|
|
||||||
|
|
||||||
from twomartens.masterthesis import evaluate
|
|
||||||
from twomartens.masterthesis import ssd
|
|
||||||
|
|
||||||
tf.enable_eager_execution()
|
|
||||||
|
|
||||||
batch_size = 16
|
|
||||||
use_dropout = False if args.network == "ssd" else True
|
|
||||||
output_path = f"{args.output_path}/val/{args.network}/{args.iteration}"
|
|
||||||
evaluation_path = f"{args.evaluation_path}/{args.network}"
|
|
||||||
result_file = f"{evaluation_path}/results-{args.iteration}.bin"
|
|
||||||
label_file = f"{output_path}/labels.bin"
|
|
||||||
predictions_file = f"{output_path}/predictions.bin"
|
|
||||||
predictions_per_class_file = f"{output_path}/predictions_class.bin"
|
|
||||||
os.makedirs(evaluation_path, exist_ok=True)
|
|
||||||
|
|
||||||
# retrieve labels and un-batch them
|
|
||||||
files = glob.glob(f"{output_path}/*ssd_labels*")
|
|
||||||
labels = []
|
|
||||||
for filename in files:
|
|
||||||
with open(filename, "rb") as file:
|
|
||||||
# get labels per batch
|
|
||||||
label_dict = pickle.load(file)
|
|
||||||
labels.extend(label_dict['labels'])
|
|
||||||
|
|
||||||
# store labels for later use
|
|
||||||
with open(label_file, "wb") as file:
|
|
||||||
pickle.dump(labels, file)
|
|
||||||
|
|
||||||
number_gt_per_class = evaluate.get_number_gt_per_class(labels, ssd.N_CLASSES)
|
|
||||||
|
|
||||||
# retrieve predictions and un-batch them
|
|
||||||
files = glob.glob(f"{output_path}/*ssd_predictions*")
|
|
||||||
predictions = []
|
|
||||||
for filename in files:
|
|
||||||
with open(filename, "rb") as file:
|
|
||||||
# get predictions per batch
|
|
||||||
_predictions = pickle.load(file)
|
|
||||||
predictions.extend(_predictions)
|
|
||||||
del _predictions
|
|
||||||
|
|
||||||
# prepare predictions for further use
|
|
||||||
with open(predictions_file, "wb") as file:
|
|
||||||
pickle.dump(predictions, file)
|
|
||||||
|
|
||||||
predictions_per_class = evaluate.prepare_predictions(predictions, ssd.N_CLASSES)
|
|
||||||
del predictions
|
|
||||||
|
|
||||||
with open(predictions_per_class_file, "wb") as file:
|
|
||||||
pickle.dump(predictions_per_class, file)
|
|
||||||
|
|
||||||
# compute matches between predictions and ground truth
|
|
||||||
true_positives, false_positives, \
|
|
||||||
cum_true_positives, cum_false_positives, open_set_error = evaluate.match_predictions(predictions_per_class,
|
|
||||||
labels,
|
|
||||||
ssd.N_CLASSES)
|
|
||||||
del labels
|
|
||||||
cum_precisions, cum_recalls = evaluate.get_precision_recall(number_gt_per_class,
|
|
||||||
cum_true_positives,
|
|
||||||
cum_false_positives,
|
|
||||||
ssd.N_CLASSES)
|
|
||||||
f1_scores = evaluate.get_f1_score(cum_precisions, cum_recalls, ssd.N_CLASSES)
|
|
||||||
average_precisions = evaluate.get_mean_average_precisions(cum_precisions, cum_recalls, ssd.N_CLASSES)
|
|
||||||
mean_average_precision = evaluate.get_mean_average_precision(average_precisions)
|
|
||||||
|
|
||||||
results = {
|
|
||||||
"true_positives": true_positives,
|
|
||||||
"false_positives": false_positives,
|
|
||||||
"cumulative_true_positives": cum_true_positives,
|
|
||||||
"cumulative_false_positives": cum_false_positives,
|
|
||||||
"cumulative_precisions": cum_precisions,
|
|
||||||
"cumulative_recalls": cum_recalls,
|
|
||||||
"f1_scores": f1_scores,
|
|
||||||
"mean_average_precisions": average_precisions,
|
|
||||||
"mean_average_precision": mean_average_precision,
|
|
||||||
"open_set_error": open_set_error
|
|
||||||
}
|
|
||||||
|
|
||||||
with open(result_file, "wb") as file:
|
|
||||||
pickle.dump(results, file)
|
|
||||||
|
|
||||||
|
|
||||||
def test(args: argparse.Namespace) -> None:
|
|
||||||
if args.network == "ssd" or args.network == "bayesian_ssd":
|
|
||||||
_ssd_test(args)
|
|
||||||
elif args.network == "auto_encoder":
|
|
||||||
_auto_encoder_test(args)
|
|
||||||
|
|
||||||
|
|
||||||
def _ssd_test(args: argparse.Namespace) -> None:
|
def _ssd_test(args: argparse.Namespace) -> None:
|
||||||
import pickle
|
import pickle
|
||||||
import os
|
import os
|
||||||
@ -386,91 +388,90 @@ def _auto_encoder_test(args: argparse.Namespace) -> None:
|
|||||||
image_size=image_size)
|
image_size=image_size)
|
||||||
|
|
||||||
|
|
||||||
def prepare(args: argparse.Namespace) -> None:
|
def _ssd_evaluate(args: argparse.Namespace) -> None:
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
from twomartens.masterthesis import data
|
|
||||||
|
|
||||||
file_names_photos, file_names_instances, instances = data.prepare_scenenet_data(args.scenenet_path,
|
|
||||||
args.protobuf_path)
|
|
||||||
with open(f"{args.ground_truth_path}/photo_paths.bin", "wb") as file:
|
|
||||||
pickle.dump(file_names_photos, file)
|
|
||||||
with open(f"{args.ground_truth_path}/instance_paths.bin", "wb") as file:
|
|
||||||
pickle.dump(file_names_instances, file)
|
|
||||||
with open(f"{args.ground_truth_path}/instances.bin", "wb") as file:
|
|
||||||
pickle.dump(instances, file)
|
|
||||||
|
|
||||||
|
|
||||||
def visualise(args: argparse.Namespace) -> None:
|
|
||||||
import pickle
|
|
||||||
|
|
||||||
from matplotlib import pyplot
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PIL import Image
|
import tensorflow as tf
|
||||||
|
|
||||||
from twomartens.masterthesis.ssd_keras.eval_utils import coco_utils
|
|
||||||
|
|
||||||
with open(f"{args.ground_truth_path}/photo_paths.bin", "rb") as file:
|
from twomartens.masterthesis import evaluate
|
||||||
file_names = pickle.load(file)
|
from twomartens.masterthesis import ssd
|
||||||
with open(f"{args.ground_truth_path}/instances.bin", "rb") as file:
|
|
||||||
instances = pickle.load(file)
|
|
||||||
|
|
||||||
output_path = f"{args.output_path}/visualise/{args.trajectory}"
|
|
||||||
annotation_file_train = f"{args.coco_path}/annotations/instances_train2014.json"
|
|
||||||
cats_to_classes, _, cats_to_names, _ = coco_utils.get_coco_category_maps(annotation_file_train)
|
|
||||||
|
|
||||||
colors = pyplot.cm.hsv(np.linspace(0, 1, 81)).tolist()
|
|
||||||
|
|
||||||
i = 0
|
tf.enable_eager_execution()
|
||||||
nr_images = len(file_names[args.trajectory])
|
|
||||||
nr_digits = math.ceil(math.log10(nr_images))
|
|
||||||
for file_name, labels in zip(file_names[args.trajectory], instances[args.trajectory]):
|
|
||||||
if not labels:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# only loop through selected trajectory
|
|
||||||
with Image.open(file_name) as image:
|
|
||||||
figure = pyplot.figure(figsize=(20, 12))
|
|
||||||
pyplot.imshow(image)
|
|
||||||
|
|
||||||
current_axis = pyplot.gca()
|
|
||||||
|
|
||||||
for instance in labels:
|
|
||||||
bbox = instance['bbox']
|
|
||||||
# Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.
|
|
||||||
xmin = bbox[0]
|
|
||||||
ymin = bbox[1]
|
|
||||||
xmax = bbox[2]
|
|
||||||
ymax = bbox[3]
|
|
||||||
color = colors[cats_to_classes[int(instance['coco_id'])]]
|
|
||||||
label = f"{cats_to_names[int(instance['coco_id'])]}: {instance['wordnet_class_name']}, {instance['wordnet_id']}"
|
|
||||||
current_axis.add_patch(
|
|
||||||
pyplot.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=False, linewidth=2))
|
|
||||||
current_axis.text(xmin, ymin, label, size='x-large', color='white',
|
|
||||||
bbox={'facecolor': color, 'alpha': 1.0})
|
|
||||||
pyplot.savefig(f"{output_path}/{str(i).zfill(nr_digits)}")
|
|
||||||
pyplot.close(figure)
|
|
||||||
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
|
|
||||||
def measure_mapping(args: argparse.Namespace) -> None:
|
|
||||||
import pickle
|
|
||||||
|
|
||||||
from twomartens.masterthesis.ssd_keras.eval_utils import coco_utils
|
|
||||||
|
|
||||||
with open(f"{args.ground_truth_path}/instances.bin", "rb") as file:
|
|
||||||
instances = pickle.load(file)
|
|
||||||
|
|
||||||
output_path = f"{args.output_path}/measure/{args.tarball_id}"
|
|
||||||
annotation_file_train = f"{args.coco_path}/annotations/instances_train2014.json"
|
|
||||||
cats_to_classes, _, _, _ = coco_utils.get_coco_category_maps(annotation_file_train)
|
|
||||||
|
|
||||||
for i, trajectory in enumerate(instances):
|
batch_size = 16
|
||||||
counts = {cat_id: 0 for cat_id in cats_to_classes.keys()}
|
use_dropout = False if args.network == "ssd" else True
|
||||||
for labels in trajectory:
|
output_path = f"{args.output_path}/val/{args.network}/{args.iteration}"
|
||||||
for instance in labels:
|
evaluation_path = f"{args.evaluation_path}/{args.network}"
|
||||||
counts[instance['coco_id']] += 1
|
result_file = f"{evaluation_path}/results-{args.iteration}.bin"
|
||||||
|
label_file = f"{output_path}/labels.bin"
|
||||||
with open(f"{output_path}/{i}.bin", "wb") as file:
|
predictions_file = f"{output_path}/predictions.bin"
|
||||||
pickle.dump(counts, file)
|
predictions_per_class_file = f"{output_path}/predictions_class.bin"
|
||||||
|
os.makedirs(evaluation_path, exist_ok=True)
|
||||||
|
|
||||||
|
# retrieve labels and un-batch them
|
||||||
|
files = glob.glob(f"{output_path}/*ssd_labels*")
|
||||||
|
labels = []
|
||||||
|
for filename in files:
|
||||||
|
with open(filename, "rb") as file:
|
||||||
|
# get labels per batch
|
||||||
|
label_dict = pickle.load(file)
|
||||||
|
labels.extend(label_dict['labels'])
|
||||||
|
|
||||||
|
# store labels for later use
|
||||||
|
with open(label_file, "wb") as file:
|
||||||
|
pickle.dump(labels, file)
|
||||||
|
|
||||||
|
number_gt_per_class = evaluate.get_number_gt_per_class(labels, ssd.N_CLASSES)
|
||||||
|
|
||||||
|
# retrieve predictions and un-batch them
|
||||||
|
files = glob.glob(f"{output_path}/*ssd_predictions*")
|
||||||
|
predictions = []
|
||||||
|
for filename in files:
|
||||||
|
with open(filename, "rb") as file:
|
||||||
|
# get predictions per batch
|
||||||
|
_predictions = pickle.load(file)
|
||||||
|
predictions.extend(_predictions)
|
||||||
|
del _predictions
|
||||||
|
|
||||||
|
# prepare predictions for further use
|
||||||
|
with open(predictions_file, "wb") as file:
|
||||||
|
pickle.dump(predictions, file)
|
||||||
|
|
||||||
|
predictions_per_class = evaluate.prepare_predictions(predictions, ssd.N_CLASSES)
|
||||||
|
del predictions
|
||||||
|
|
||||||
|
with open(predictions_per_class_file, "wb") as file:
|
||||||
|
pickle.dump(predictions_per_class, file)
|
||||||
|
|
||||||
|
# compute matches between predictions and ground truth
|
||||||
|
true_positives, false_positives, \
|
||||||
|
cum_true_positives, cum_false_positives, open_set_error = evaluate.match_predictions(predictions_per_class,
|
||||||
|
labels,
|
||||||
|
ssd.N_CLASSES)
|
||||||
|
del labels
|
||||||
|
cum_precisions, cum_recalls = evaluate.get_precision_recall(number_gt_per_class,
|
||||||
|
cum_true_positives,
|
||||||
|
cum_false_positives,
|
||||||
|
ssd.N_CLASSES)
|
||||||
|
f1_scores = evaluate.get_f1_score(cum_precisions, cum_recalls, ssd.N_CLASSES)
|
||||||
|
average_precisions = evaluate.get_mean_average_precisions(cum_precisions, cum_recalls, ssd.N_CLASSES)
|
||||||
|
mean_average_precision = evaluate.get_mean_average_precision(average_precisions)
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"true_positives": true_positives,
|
||||||
|
"false_positives": false_positives,
|
||||||
|
"cumulative_true_positives": cum_true_positives,
|
||||||
|
"cumulative_false_positives": cum_false_positives,
|
||||||
|
"cumulative_precisions": cum_precisions,
|
||||||
|
"cumulative_recalls": cum_recalls,
|
||||||
|
"f1_scores": f1_scores,
|
||||||
|
"mean_average_precisions": average_precisions,
|
||||||
|
"mean_average_precision": mean_average_precision,
|
||||||
|
"open_set_error": open_set_error
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(result_file, "wb") as file:
|
||||||
|
pickle.dump(results, file)
|
||||||
|
|||||||
Reference in New Issue
Block a user