Make bottleneck even harder

Signed-off-by: Jim Martens <github@2martens.de>
This commit is contained in:
2019-04-17 14:26:30 +02:00
parent 7249d569f7
commit 532f4ac567

View File

@ -132,11 +132,11 @@ def _val(args: argparse.Namespace) -> None:
with use_summary_writer.as_default(): with use_summary_writer.as_default():
run.run_simple(coco_data, iteration=args.iteration_trained, run.run_simple(coco_data, iteration=args.iteration_trained,
weights_prefix=f"{args.weights_path}/category-{category_trained}", weights_prefix=f"{args.weights_path}/category-{category_trained}",
zsize=32, verbose=args.verbose, channels=3, batch_size=batch_size) zsize=16, verbose=args.verbose, channels=3, batch_size=batch_size)
else: else:
run.run_simple(coco_data, iteration=args.iteration_trained, run.run_simple(coco_data, iteration=args.iteration_trained,
weights_prefix=f"{args.weights_path}/category-{category_trained}", weights_prefix=f"{args.weights_path}/category-{category_trained}",
zsize=32, verbose=args.verbose, channels=3, batch_size=batch_size) zsize=16, verbose=args.verbose, channels=3, batch_size=batch_size)
def _auto_encoder_train(args: argparse.Namespace) -> None: def _auto_encoder_train(args: argparse.Namespace) -> None:
@ -158,12 +158,12 @@ def _auto_encoder_train(args: argparse.Namespace) -> None:
with train_summary_writer.as_default(): with train_summary_writer.as_default():
train.train_simple(coco_data, iteration=args.iteration, train.train_simple(coco_data, iteration=args.iteration,
weights_prefix=f"{args.weights_path}/category-{category}", weights_prefix=f"{args.weights_path}/category-{category}",
zsize=32, lr=0.0001, verbose=args.verbose, zsize=16, lr=0.0001, verbose=args.verbose,
channels=3, train_epoch=args.num_epochs, batch_size=batch_size) channels=3, train_epoch=args.num_epochs, batch_size=batch_size)
else: else:
train.train_simple(coco_data, iteration=args.iteration, train.train_simple(coco_data, iteration=args.iteration,
weights_prefix=f"{args.weights_path}/category-{category}", weights_prefix=f"{args.weights_path}/category-{category}",
zsize=32, lr=0.0001, verbose=args.verbose, zsize=16, lr=0.0001, verbose=args.verbose,
channels=3, train_epoch=args.num_epochs, batch_size=batch_size) channels=3, train_epoch=args.num_epochs, batch_size=batch_size)