Skip to content

Commit

Permalink
refactor(NeuralNetwork): remove unused multiprocessing parameter
Browse files Browse the repository at this point in the history
- Removed multiprocessing parameter from NeuralNetwork class.
- Updated related documentation and files to reflect this change.
  • Loading branch information
rizoudal committed Aug 12, 2024
1 parent 8e5c66e commit 23c78e8
Show file tree
Hide file tree
Showing 7 changed files with 4 additions and 20 deletions.
1 change: 0 additions & 1 deletion aucmedi/automl/block_pred.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ def block_predict(config):
nn_paras = {"n_labels": 1, # placeholder
"channels": 1, # placeholder
"batch_queue_size": 4,
"multiprocessing": False,
}
# Select input shape for 3D
if meta_training["three_dim"]:
Expand Down
1 change: 0 additions & 1 deletion aucmedi/automl/block_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ def block_train(config):
"loss": loss,
"metrics": [AUC(100), F1Score(average="macro")],
"pretrained_weights": True,
"multiprocessing": False,
}
# Select input shape for 3D
if config["three_dim"] : nn_paras["input_shape"] = config["shape_3D"]
Expand Down
2 changes: 0 additions & 2 deletions aucmedi/ensemble/bagging.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ def train(self, training_generator, epochs=20, iterations=None,
"meta_variables": self.model_template.meta_variables,
"learning_rate": self.model_template.learning_rate,
"batch_queue_size": self.model_template.batch_queue_size,
"multiprocessing": self.model_template.multiprocessing,
}

# Gather DataGenerator parameters
Expand Down Expand Up @@ -324,7 +323,6 @@ def predict(self, prediction_generator, aggregate="mean",
"meta_variables": self.model_template.meta_variables,
"learning_rate": self.model_template.learning_rate,
"batch_queue_size": self.model_template.batch_queue_size,
"multiprocessing": self.model_template.multiprocessing,
}

# Start inference process for fold i
Expand Down
3 changes: 0 additions & 3 deletions aucmedi/ensemble/composite.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,6 @@ def train(self, training_generator, epochs=20, iterations=None,
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"multiprocessing": self.model_list[i].multiprocessing,
}

# Gather DataGenerator parameters
Expand Down Expand Up @@ -354,7 +353,6 @@ def train_metalearner(self, training_generator):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"multiprocessing": self.model_list[i].multiprocessing,
}

# Gather DataGenerator parameters
Expand Down Expand Up @@ -467,7 +465,6 @@ def predict(self, prediction_generator, return_ensemble=False):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"multiprocessing": self.model_list[i].multiprocessing,
}

# Gather DataGenerator parameters
Expand Down
3 changes: 0 additions & 3 deletions aucmedi/ensemble/stacking.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,6 @@ def train(self, training_generator, epochs=20, iterations=None,
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"multiprocessing": self.model_list[i].multiprocessing,
}

# Gather DataGenerator parameters
Expand Down Expand Up @@ -342,7 +341,6 @@ def train_metalearner(self, training_generator):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"multiprocessing": self.model_list[i].multiprocessing,
}

# Gather DataGenerator parameters
Expand Down Expand Up @@ -454,7 +452,6 @@ def predict(self, prediction_generator, return_ensemble=False):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"multiprocessing": self.model_list[i].multiprocessing,
}

# Gather DataGenerator parameters
Expand Down
8 changes: 1 addition & 7 deletions aucmedi/neural_network/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
pretrained_weights=False, loss="categorical_crossentropy",
metrics=["categorical_accuracy"], activation_output="softmax",
fcl_dropout=True, meta_variables=None, learning_rate=0.0001,
batch_queue_size=10, multiprocessing=False,
batch_queue_size=10,
verbose=1):
""" Initialization function for creating a Neural Network (model) object.
Expand Down Expand Up @@ -169,7 +169,6 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
([Classifier][aucmedi.neural_network.architectures.classifier]).
learning_rate (float): Learning rate in which weights of the neural network will be updated.
batch_queue_size (int): The batch queue size is the number of previously prepared batches in the cache during runtime.
multiprocessing (bool): Option whether to utilize multi-processing for workers instead of threading .
verbose (int): Option (0/1) how much information should be written to stdout.
???+ danger
Expand All @@ -191,7 +190,6 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
self.metrics = metrics
self.learning_rate = learning_rate
self.batch_queue_size = batch_queue_size
self.multiprocessing = multiprocessing
self.pretrained_weights = pretrained_weights
self.activation_output = activation_output
self.fcl_dropout = fcl_dropout
Expand Down Expand Up @@ -296,7 +294,6 @@ def train(self, training_generator, validation_generator=None, epochs=20,
callbacks=callbacks, epochs=epochs,
steps_per_epoch=iterations,
class_weight=class_weights,
use_multiprocessing=self.multiprocessing,
max_queue_size=self.batch_queue_size,
verbose=self.verbose)
# Return logged history object
Expand All @@ -318,7 +315,6 @@ def train(self, training_generator, validation_generator=None, epochs=20,
epochs=self.tf_epochs,
steps_per_epoch=iterations,
class_weight=class_weights,
use_multiprocessing=self.multiprocessing,
max_queue_size=self.batch_queue_size,
verbose=self.verbose)
# Unfreeze base model layers again
Expand All @@ -334,7 +330,6 @@ def train(self, training_generator, validation_generator=None, epochs=20,
initial_epoch=self.tf_epochs,
steps_per_epoch=iterations,
class_weight=class_weights,
use_multiprocessing=self.multiprocessing,
max_queue_size=self.batch_queue_size,
verbose=self.verbose)
# Combine logged history objects
Expand Down Expand Up @@ -365,7 +360,6 @@ def predict(self, prediction_generator):
# Run inference process with the Keras predict function
preds = self.model.predict(prediction_generator,
max_queue_size=self.batch_queue_size,
use_multiprocessing=self.multiprocessing,
verbose=self.verbose)
# Output predictions results
return preds
Expand Down
6 changes: 3 additions & 3 deletions examples/applications/xray_pneumonia.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -5306,7 +5306,7 @@
" \n",
"model = model.NeuralNetwork(n_labels=2, channels=3, input_shape=(224, 224, 3), architecture = architecture, pretrained_weights=True, \n",
" loss=\"categorical_crossentropy\", metrics=[\"categorical_accuracy\"], activation_output=\"softmax\", \n",
" fcl_dropout=True, learninig_rate=0.0001, batch_queue_size=10, multiprocessing=False, verbose=1)"
" fcl_dropout=True, learninig_rate=0.0001, batch_queue_size=10, verbose=1)"
]
},
{
Expand Down Expand Up @@ -7419,7 +7419,7 @@
"architecture = architectures.MobileNetV2(channels = 3)\n",
"model = model.NeuralNetwork(n_labels=2, channels=3, input_shape=(224, 224, 3), architecture = architecture, pretrained_weights=True, \n",
" loss=\"categorical_crossentropy\", metrics=[\"categorical_accuracy\"], activation_output=\"softmax\", \n",
" fcl_dropout=True, learninig_rate=0.0001, batch_queue_size=10, multiprocessing=False, verbose=1)\n",
" fcl_dropout=True, learninig_rate=0.0001, batch_queue_size=10, verbose=1)\n",
"```\n",
"\n",
"The imports needed for this part are:\n",
Expand Down Expand Up @@ -10832,7 +10832,7 @@
"#architecture = architectures.MobileNetV2(channels = 3)\n",
"#model = model.NeuralNetwork(n_labels=2, channels=3, input_shape=(224, 224, 3), architecture = architecture, pretrained_weights=True, \n",
"# loss=\"categorical_crossentropy\", metrics=[\"categorical_accuracy\"], activation_output=\"softmax\", \n",
"# fcl_dropout=True, learninig_rate=0.0001, batch_queue_size=10, multiprocessing=False, verbose=1)\n",
"# fcl_dropout=True, learninig_rate=0.0001, batch_queue_size=10, verbose=1)\n",
"\n",
"model.load(file_path='/content/drive/MyDrive/Models/fold1.h5')\n",
"pred_one_image = model.predict(prediction_generator=img)\n",
Expand Down

0 comments on commit 23c78e8

Please sign in to comment.