Changed mode argument to training argument to better reflect requirements

Signed-off-by: Jim Martens <github@2martens.de>
This commit is contained in:
2019-06-13 15:12:37 +02:00
parent c260b7d824
commit 49a320ca12
2 changed files with 5 additions and 5 deletions

View File

@ -75,13 +75,13 @@ def _ssd_train(args: argparse.Namespace) -> None:
predictor_sizes=ssd_model.predictor_sizes, predictor_sizes=ssd_model.predictor_sizes,
batch_size=batch_size, batch_size=batch_size,
resized_shape=(image_size, image_size), resized_shape=(image_size, image_size),
mode="training") training=True)
val_generator, val_length = \ val_generator, val_length = \
data.load_scenenet_data(file_names_val, instances_val, args.coco_path, data.load_scenenet_data(file_names_val, instances_val, args.coco_path,
predictor_sizes=ssd_model.predictor_sizes, predictor_sizes=ssd_model.predictor_sizes,
batch_size=batch_size, batch_size=batch_size,
resized_shape=(image_size, image_size), resized_shape=(image_size, image_size),
mode="validation") training=False)
del file_names_train, instances_train, file_names_val, instances_val del file_names_train, instances_train, file_names_val, instances_val
nr_batches_train = int(math.ceil(train_length / float(batch_size))) nr_batches_train = int(math.ceil(train_length / float(batch_size)))

View File

@ -237,7 +237,7 @@ def load_scenenet_data(photo_paths: Sequence[Sequence[str]],
coco_path: str, predictor_sizes: np.ndarray, coco_path: str, predictor_sizes: np.ndarray,
batch_size: int, batch_size: int,
resized_shape: Sequence[int], resized_shape: Sequence[int],
mode: str) -> Tuple[callable, int]: training: bool) -> Tuple[callable, int]:
""" """
Loads the SceneNet RGB-D data and returns a data set. Loads the SceneNet RGB-D data and returns a data set.
@ -248,7 +248,7 @@ def load_scenenet_data(photo_paths: Sequence[Sequence[str]],
predictor_sizes: sizes of the predictor layers predictor_sizes: sizes of the predictor layers
batch_size: size of every batch batch_size: size of every batch
resized_shape: shape of input images to SSD resized_shape: shape of input images to SSD
mode: one of "validation" or "training" training: True if training data is desired
Returns: Returns:
scenenet data set generator scenenet data set generator
@ -290,7 +290,7 @@ def load_scenenet_data(photo_paths: Sequence[Sequence[str]],
labels=final_labels labels=final_labels
) )
if mode == "training": if training:
shuffle = True shuffle = True
transformations = [data_augmentation_chain_original_ssd.SSDDataAugmentation( transformations = [data_augmentation_chain_original_ssd.SSDDataAugmentation(
img_width=resized_shape[0], img_width=resized_shape[0],