Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes for reference before assignment… #18640

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
432c6e6
Fixes for reference before assignment found through static analysis t…
SamuelMarks Oct 17, 2023
cdaa4a9
Fixes for reference before assignment found through static analysis t…
SamuelMarks Oct 17, 2023
3a50584
[keras/layers/preprocessing/discretization_test.py] Remove `assertAll…
SamuelMarks Oct 25, 2023
c923ba9
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks Oct 25, 2023
6c57745
[keras/ops/numpy.py] Revert to see if segfault resolves on CI
SamuelMarks Oct 26, 2023
ba50c95
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks Dec 6, 2023
7332a05
[keras/{callbacks/reduce_lr_on_plateau,layers/preprocessing/{center_c…
SamuelMarks Dec 6, 2023
c16f397
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks Dec 29, 2023
040582f
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks May 2, 2024
2978b7a
Merge master
SamuelMarks Jun 14, 2024
e09fc10
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks Jun 21, 2024
fd70bd6
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks Sep 24, 2024
f388573
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks Sep 27, 2024
873d002
Merge branch 'master' into fix-reference-before-assignment
SamuelMarks Oct 11, 2024
5ee7555
[keras/src/layers/preprocessing/image_preprocessing/center_crop_test.…
SamuelMarks Oct 11, 2024
a3bd38d
[keras/src/layers/preprocessing/image_preprocessing/random_crop_test.…
SamuelMarks Oct 11, 2024
f2542e9
[keras/src/layers/preprocessing/image_preprocessing/{center,random}_c…
SamuelMarks Oct 11, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/demo_jax_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ def train_step(train_state, x, y):
print("\nTraining:")
data_iter = iter(train_data)
for epoch in range(EPOCHS):
loss_value = None # default
for i in tqdm(range(STEPS_PER_EPOCH)):
x, y = next(data_iter)
sharded_x = jax.device_put(x.numpy(), data_sharding)
Expand Down
4 changes: 2 additions & 2 deletions guides/custom_train_step_in_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def train_step(self, state, data):
)

# Update metrics.
new_metrics_vars = []
new_metrics_vars, logs = [], []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars)
Expand Down Expand Up @@ -314,7 +314,7 @@ def test_step(self, state, data):
loss = self.compute_loss(x, y, y_pred)

# Update metrics.
new_metrics_vars = []
new_metrics_vars, logs = [], []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars)
Expand Down
1 change: 1 addition & 0 deletions guides/distributed_training_with_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ def get_replicated_train_state(devices):
# Custom training loop
for epoch in range(num_epochs):
data_iter = iter(train_data)
loss_value = None # default
for data in data_iter:
x, y = data
sharded_x = jax.device_put(x.numpy(), data_sharding)
Expand Down
2 changes: 2 additions & 0 deletions keras/src/activations/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0):
negative_part = backend.nn.relu(-x + threshold)
else:
negative_part = backend.nn.relu(-x)
else:
negative_part = 1

clip_max = max_value is not None
if threshold != 0:
Expand Down
4 changes: 4 additions & 0 deletions keras/src/applications/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,8 @@ def DenseNet(
cache_subdir="models",
file_hash="1ceb130c1ea1b78c3bf6114dbdfd8807",
)
else:
raise ValueError("weights_path undefined")
else:
if blocks == [6, 12, 24, 16]:
weights_path = file_utils.get_file(
Expand All @@ -311,6 +313,8 @@ def DenseNet(
cache_subdir="models",
file_hash="c13680b51ded0fb44dff2d8f86ac8bb1",
)
else:
raise ValueError("weights_path undefined")
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
Expand Down
1 change: 1 addition & 0 deletions keras/src/backend/jax/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,7 @@ def fit(

self.make_train_function()
self.stop_training = False
training_logs = {}
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
for epoch in range(initial_epoch, epochs):
Expand Down
1 change: 1 addition & 0 deletions keras/src/backend/torch/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,7 @@ def fit(
)

self.stop_training = False
training_logs = {}
self.make_train_function()
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
Expand Down
2 changes: 2 additions & 0 deletions keras/src/layers/attention/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ def _calculate_scores(self, query, key):
scores = self.concat_score_weight * ops.sum(
ops.tanh(q_reshaped + k_reshaped), axis=-1
)
else:
raise ValueError("scores not computed")

return scores

Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/feature_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,8 +517,7 @@ def adapt(self, dataset):
preprocessor = self.preprocessors[name]
# TODO: consider adding an adapt progress bar.
# Sample 1 element to check the rank
for x in feature_dataset.take(1):
pass
x = next(iter(feature_dataset))
if len(x.shape) == 0:
# The dataset yields unbatched scalars; batch it.
feature_dataset = feature_dataset.batch(32)
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/hashed_crossing_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ def test_tf_data_compatibility(self):
.batch(5)
.map(lambda x1, x2: layer((x1, x2)))
)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(np.array([1, 4, 1, 1, 3]), output)

def test_unsupported_shape_input_fails(self):
Expand Down
5 changes: 3 additions & 2 deletions keras/src/layers/preprocessing/hashing_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ def test_tf_data_compatibility(self):
layer = layers.Hashing(num_bins=3)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
ds = tf.data.Dataset.from_tensor_slices(inp).batch(5).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[1], [0], [1], [1], [2]]))

@parameterized.named_parameters(
Expand Down Expand Up @@ -306,6 +305,8 @@ def test_count_output(self, input_value, expected_output, output_shape):
symbolic_sample_shape = ()
elif input_array.ndim == 2:
symbolic_sample_shape = (None,)
else:
raise TypeError("Unknown `symbolic_sample_shape`")
inputs = layers.Input(shape=symbolic_sample_shape, dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="count")
outputs = layer(inputs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,8 +171,7 @@ def test_tf_data_compatibility(self):
layer = layers.CenterCrop(8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)

# TODO
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,7 @@ def test_tf_data_compatibility(self):
layer = layers.RandomContrast(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()

def test_dict_input(self):
layer = layers.RandomContrast(factor=0.1, bounding_box_format="xyxy")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,7 @@ def test_tf_data_compatibility(self):
output_shape = (2, 3, 8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)

def test_dict_input(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,7 @@ def test_tf_data_compatibility(self):
input_data = np.array([[[2, 3, 4]], [[5, 6, 7]]])
expected_output = np.array([[[5, 6, 7]], [[2, 3, 4]]])
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
# Test 4D input: shape (2, 2, 1, 3)
layer = layers.RandomFlip(
Expand All @@ -167,6 +166,5 @@ def test_tf_data_compatibility(self):
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,5 @@ def test_tf_data_compatibility(self):
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)
Original file line number Diff line number Diff line change
Expand Up @@ -327,5 +327,4 @@ def test_tf_data_compatibility(self):
layer = layers.RandomTranslation(0.2, 0.1)
input_data = np.random.random((1, 4, 4, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(1).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,7 @@ def test_tf_data_compatibility(self):
[0, 0, 0, 0, 0],
]
).reshape(input_shape)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)

def test_dynamic_shape(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,7 @@ def test_tf_data_compatibility(self):
layer = layers.Resizing(8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)

@pytest.mark.skipif(
Expand All @@ -210,8 +209,7 @@ def test_tf_data_compatibility_sequential(self):
.batch(2)
.map(Sequential([layer]))
)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)

@parameterized.parameters(
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/integer_lookup_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,5 @@ def test_tf_data_compatibility(self):
)
input_data = [2, 3, 4, 5]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 4, 0]))
2 changes: 2 additions & 0 deletions keras/src/layers/preprocessing/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,8 @@ def adapt(self, data):
batch_var + (batch_mean - new_total_mean) ** 2
) * batch_weight
total_mean = new_total_mean
else:
raise NotImplementedError(type(data))

self.adapt_mean.assign(total_mean)
self.adapt_variance.assign(total_var)
Expand Down
2 changes: 2 additions & 0 deletions keras/src/layers/preprocessing/normalization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ def test_normalization_adapt(self, input_type):
data = backend.convert_to_tensor(x)
elif input_type == "tf.data":
data = tf_data.Dataset.from_tensor_slices(x).batch(8)
else:
raise NotImplementedError(input_type)

layer = layers.Normalization()
layer.adapt(data)
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/rescaling_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,7 @@ def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()

def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
Expand Down
3 changes: 1 addition & 2 deletions keras/src/layers/preprocessing/string_lookup_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,7 @@ def test_tf_data_compatibility(self):
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 0]))

@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
Expand Down
6 changes: 2 additions & 4 deletions keras/src/layers/preprocessing/text_vectorization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,7 @@ def test_tf_data_compatibility(self):
)
input_data = [["foo qux bar"], ["qux baz"]]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))

# Test adapt flow
Expand All @@ -107,8 +106,7 @@ def test_tf_data_compatibility(self):
)
layer.adapt(input_data)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()

@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string tensors."
Expand Down
2 changes: 2 additions & 0 deletions keras/src/legacy/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -1279,6 +1279,8 @@ def relu(x, alpha=0.0, max_value=None, threshold=0.0):
negative_part = tf.nn.relu(-x + threshold)
else:
negative_part = tf.nn.relu(-x)
else:
negative_part = 1

clip_max = max_value is not None

Expand Down
5 changes: 3 additions & 2 deletions keras/src/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,7 @@ def quantize(self, mode, **kwargs):
def build_from_config(self, config):
if not config:
return
status = False
if "input_shape" in config:
# Case: all inputs are in the first arg (possibly nested).
if utils.is_default(self.build):
Expand All @@ -408,7 +409,7 @@ def build_from_config(self, config):
self.build(config["input_shape"])
status = True
except:
status = False
pass
self._build_shapes_dict = config

elif "shapes_dict" in config:
Expand All @@ -420,7 +421,7 @@ def build_from_config(self, config):
self.build(**config["shapes_dict"])
status = True
except:
status = False
pass
self._build_shapes_dict = config["shapes_dict"]

if not status:
Expand Down
1 change: 1 addition & 0 deletions keras/src/models/sequential.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,7 @@ def from_config(cls, config, custom_objects=None):
model.add(layer)
if (
not model._functional
and "build_input_shape" in locals()
and build_input_shape
and isinstance(build_input_shape, (tuple, list))
):
Expand Down
2 changes: 1 addition & 1 deletion keras/src/ops/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def map_graph(inputs, outputs):
"The following previous operations were accessed "
f"without issue: {operations_with_complete_input}"
)
operations_with_complete_input.append(operation.name)
operations_with_complete_input.append(node.operation.name)

for x in tree.flatten(node.outputs):
computable_tensors.add(x)
Expand Down
2 changes: 2 additions & 0 deletions keras/src/optimizers/base_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -914,6 +914,8 @@ def get_config(self):
learning_rate = serialization_lib.serialize_keras_object(
self._learning_rate
)
else:
learning_rate = 0.5

config = {
"name": self.name,
Expand Down
2 changes: 1 addition & 1 deletion keras/src/trainers/compile_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ def variables(self):
return vars

def build(self, y_true, y_pred):
num_outputs = 1 # default
if self.output_names:
output_names = self.output_names
elif isinstance(y_pred, dict):
Expand All @@ -182,7 +183,6 @@ def build(self, y_true, y_pred):
output_names = None
else:
output_names = None
num_outputs = 1
if output_names:
num_outputs = len(output_names)

Expand Down
1 change: 1 addition & 0 deletions keras/src/tree/dmtree_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def truncate(value, length):
)
return flat_sequence[0]

packed = []
try:
final_index, packed = packed_nest_with_indices(
structure, flat_sequence, 0, is_nested_fn, sequence_fn
Expand Down
4 changes: 3 additions & 1 deletion keras/src/utils/file_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,11 @@ def extract_archive(file_path, path=".", archive_format="auto"):
if archive_type == "tar":
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == "zip":
elif archive_type == "zip":
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
else:
raise NotImplementedError(archive_type)

if is_match_fn(file_path):
with open_fn(file_path) as archive:
Expand Down