Skip to content

Commit

Permalink
Address various warnings in ResourceUtils class (#10456)
Browse files Browse the repository at this point in the history
Signed-off-by: Jakub Scholz <www@scholzj.com>
  • Loading branch information
scholzj committed Aug 16, 2024
1 parent 4b54ddc commit 9181fa3
Show file tree
Hide file tree
Showing 10 changed files with 23 additions and 62 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,13 @@
*/
package io.strimzi.operator.cluster;

import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.LoadBalancerIngressBuilder;
import io.fabric8.kubernetes.api.model.ObjectMeta;
import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.SecretBuilder;
import io.fabric8.kubernetes.api.model.ServiceBuilder;
import io.fabric8.kubernetes.api.model.ServicePortBuilder;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.openshift.api.model.RouteBuilder;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import io.strimzi.api.kafka.model.bridge.KafkaBridge;
Expand Down Expand Up @@ -86,7 +84,6 @@
import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.auth.PemAuthIdentity;
import io.strimzi.operator.common.auth.PemTrustSet;
import io.strimzi.operator.common.auth.TlsPemIdentity;
import io.strimzi.operator.common.model.Ca;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.test.TestUtils;
Expand All @@ -112,14 +109,14 @@
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.admin.ZooKeeperAdmin;

import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.HashMap;
Expand All @@ -128,6 +125,7 @@
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Properties;
import java.util.stream.Stream;

import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList;
Expand Down Expand Up @@ -247,24 +245,6 @@ public static Secret createInitialCaKeySecret(String clusterNamespace, String cl
.build();
}

public static Kafka createKafka(String namespace, String name, int replicas,
String image, int healthDelay, int healthTimeout,
MetricsConfig metricsConfig,
Map<String, Object> kafkaConfigurationJson,
Logging kafkaLogging, Logging zkLogging) {
return new KafkaBuilder(createKafka(namespace, name, replicas, image, healthDelay,
healthTimeout, metricsConfig, kafkaConfigurationJson, emptyMap()))
.editSpec()
.editKafka()
.withLogging(kafkaLogging)
.endKafka()
.editZookeeper()
.withLogging(zkLogging)
.endZookeeper()
.endSpec()
.build();
}

@SuppressWarnings({"checkstyle:ParameterNumber"})
public static Kafka createKafka(String namespace, String name, int replicas,
String image, int healthDelay, int healthTimeout,
Expand Down Expand Up @@ -369,6 +349,7 @@ public static KafkaBridge createEmptyKafkaBridge(String namespace, String name)
/**
* Create an empty Kafka MirrorMaker custom resource
*/
@SuppressWarnings("deprecation")
public static KafkaMirrorMaker createEmptyKafkaMirrorMaker(String namespace, String name) {
return new KafkaMirrorMakerBuilder()
.withMetadata(new ObjectMetaBuilder()
Expand Down Expand Up @@ -416,7 +397,7 @@ public static KafkaMirrorMaker createKafkaMirrorMaker(String namespace, String n
return builder.build();
}

public static KafkaBridge createKafkaBridge(String namespace, String name, String image, int replicas, String bootstrapservers, KafkaBridgeProducerSpec producer, KafkaBridgeConsumerSpec consumer, KafkaBridgeHttpConfig http, boolean enableMetrics) {
public static KafkaBridge createKafkaBridge(String namespace, String name, String image, int replicas, String bootstrapServers, KafkaBridgeProducerSpec producer, KafkaBridgeConsumerSpec consumer, KafkaBridgeHttpConfig http, boolean enableMetrics) {
return new KafkaBridgeBuilder()
.withMetadata(new ObjectMetaBuilder()
.withName(name)
Expand All @@ -427,7 +408,7 @@ public static KafkaBridge createKafkaBridge(String namespace, String name, Strin
.withNewSpec()
.withImage(image)
.withReplicas(replicas)
.withBootstrapServers(bootstrapservers)
.withBootstrapServers(bootstrapServers)
.withProducer(producer)
.withConsumer(consumer)
.withEnableMetrics(enableMetrics)
Expand Down Expand Up @@ -468,18 +449,20 @@ public static KafkaMirrorMaker2 createEmptyKafkaMirrorMaker2(String namespace, S

public static void cleanUpTemporaryTLSFiles() {
String tmpString = "/tmp";
try {
Files.list(Paths.get(tmpString)).filter(path -> path.toString().startsWith(tmpString + "/tls")).forEach(delPath -> {
try (Stream<Path> files = Files.list(Paths.get(tmpString))) {
files.filter(path -> path.toString().startsWith(tmpString + "/tls")).forEach(delPath -> {
try {
Files.deleteIfExists(delPath);
} catch (IOException e) {
// Nothing to do
}
});
} catch (IOException e) {
// Nothing to do
}
}

public static ZookeeperLeaderFinder zookeeperLeaderFinder(Vertx vertx, KubernetesClient client) {
public static ZookeeperLeaderFinder zookeeperLeaderFinder(Vertx vertx) {
return new ZookeeperLeaderFinder(vertx, () -> new BackOff(5_000, 2, 4)) {
@Override
protected Future<Boolean> isLeader(Reconciliation reconciliation, String podName, NetClientOptions options) {
Expand Down Expand Up @@ -533,7 +516,7 @@ public static Admin adminClient() {
}
when(mock.describeConfigs(any())).thenReturn(dcfr);

// Mocks the describeFeatures() call used in KRaft to manege metadata version
// Mocks the describeFeatures() call used in KRaft to manage metadata version
DescribeFeaturesResult dfr;
try {
Constructor<DescribeFeaturesResult> declaredConstructor = DescribeFeaturesResult.class.getDeclaredConstructor(KafkaFuture.class);
Expand All @@ -545,7 +528,7 @@ public static Admin adminClient() {

short metadataLevel = MetadataVersion.fromVersionString(KafkaVersionTestUtils.getKafkaVersionLookup().defaultVersion().metadataVersion()).featureLevel();
FinalizedVersionRange finalizedVersionRange = declaredConstructor3.newInstance(metadataLevel, metadataLevel);
FeatureMetadata featureMetadata = declaredConstructor2.newInstance(Map.of(MetadataVersion.FEATURE_NAME, finalizedVersionRange), Optional.ofNullable(null), Map.of());
FeatureMetadata featureMetadata = declaredConstructor2.newInstance(Map.of(MetadataVersion.FEATURE_NAME, finalizedVersionRange), Optional.empty(), Map.of());
KafkaFuture<FeatureMetadata> kafkaFuture = KafkaFutureImpl.completedFuture(featureMetadata);
dfr = declaredConstructor.newInstance(kafkaFuture);
} catch (ReflectiveOperationException e) {
Expand Down Expand Up @@ -640,12 +623,7 @@ public static KafkaAgentClientProvider kafkaAgentClientProvider() {
}

public static KafkaAgentClientProvider kafkaAgentClientProvider(KafkaAgentClient mockKafkaAgentClient) {
return new KafkaAgentClientProvider() {
@Override
public KafkaAgentClient createKafkaAgentClient(Reconciliation reconciliation, TlsPemIdentity tlsPemIdentity) {
return mockKafkaAgentClient;
}
};
return (reconciliation, tlsPemIdentity) -> mockKafkaAgentClient;
}

public static ZooKeeperAdmin zooKeeperAdmin() {
Expand All @@ -659,12 +637,7 @@ public static ZooKeeperAdminProvider zooKeeperAdminProvider() {
}

public static ZooKeeperAdminProvider zooKeeperAdminProvider(ZooKeeperAdmin mockZooKeeperAdmin) {
return new ZooKeeperAdminProvider() {
@Override
public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTimeout, Watcher watcher, long operationTimeoutMs, String trustStoreFile, String keyStoreFile) throws IOException {
return mockZooKeeperAdmin;
}
};
return (connectString, sessionTimeout, watcher, operationTimeoutMs, trustStoreFile, keyStoreFile) -> mockZooKeeperAdmin;
}

public static MetricsProvider metricsProvider() {
Expand Down Expand Up @@ -777,16 +750,4 @@ public static ClusterOperatorConfig dummyClusterOperatorConfig(String featureGat
.with(ClusterOperatorConfig.FEATURE_GATES.key(), featureGates)
.build();
}

/**
* Find the first resource in the given resources with the given name.
* @param resources The resources to search.
* @param name The name of the resource.
* @return The first resource with that name. Names should be unique per namespace.
*/
public static <T extends HasMetadata> T findResourceWithName(List<T> resources, String name) {
return resources.stream()
.filter(s -> s.getMetadata().getName().equals(name)).findFirst()
.orElse(null);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ public void beforeEach(TestInfo testInfo) {
// creating the Kafka operator
ResourceOperatorSupplier ros =
new ResourceOperatorSupplier(JbodStorageMockZooBasedTest.vertx, client,
ResourceUtils.zookeeperLeaderFinder(JbodStorageMockZooBasedTest.vertx, client),
ResourceUtils.zookeeperLeaderFinder(JbodStorageMockZooBasedTest.vertx),
ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(),
ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), pfa, 60_000L);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ public void afterEach() {
}

private Future<Void> initialize() {
supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(),
supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(),
ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000);

podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ public void afterEach() {
}

private ResourceOperatorSupplier supplierWithMocks() {
return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client),
return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx),
ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(),
ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ public void afterEach() {
}

private ResourceOperatorSupplier supplierWithMocks() {
return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client),
return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx),
ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(),
ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ public void beforeEach(TestInfo testInfo) {
namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT);
mockKube.prepareNamespace(namespace);

supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000);
supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000);
podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue()));
podSetController.start();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ public void afterEach() {
}

private Future<Void> initialize() {
supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(),
supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(),
ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000);

podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ private Future<Void> initialize(String initialMetadataVersion) {
Admin mockAdmin = ResourceUtils.adminClient();
metadataLevel = new AtomicInteger(metadataVersionToLevel(initialMetadataVersion));
mockAdminClient(mockAdmin);
supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(mockAdmin),
supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(mockAdmin),
ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000);

podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ public void afterEach() {
ResourceOperatorSupplier supplier(KubernetesClient bootstrapClient, PlatformFeaturesAvailability pfa) {
return new ResourceOperatorSupplier(vertx,
bootstrapClient,
ResourceUtils.zookeeperLeaderFinder(vertx, bootstrapClient),
ResourceUtils.zookeeperLeaderFinder(vertx),
ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(),
ResourceUtils.metricsProvider(),
ResourceUtils.zooKeeperAdminProvider(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ public void afterEach() {
ResourceOperatorSupplier supplier(KubernetesClient bootstrapClient, PlatformFeaturesAvailability pfa) {
return new ResourceOperatorSupplier(vertx,
bootstrapClient,
ResourceUtils.zookeeperLeaderFinder(vertx, bootstrapClient),
ResourceUtils.zookeeperLeaderFinder(vertx),
ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(),
ResourceUtils.metricsProvider(),
ResourceUtils.zooKeeperAdminProvider(),
Expand Down

0 comments on commit 9181fa3

Please sign in to comment.