Skip to content

Commit f0d2a52

Browse files
committed
Merge branch 'trunk' into KAFKA-19999
2 parents c667152 + de029ac commit f0d2a52

File tree

172 files changed

+31181
-26002
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

172 files changed

+31181
-26002
lines changed

clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -577,7 +577,7 @@ private Optional<Node> maybeNodeForPosition(TopicPartition partition,
577577
* </p>
578578
*
579579
* <p>
580-
* Here's why this is importantin a production system, a given leader node serves as a leader for many partitions.
580+
* Here's why this is important-in a production system, a given leader node serves as a leader for many partitions.
581581
* From the client's perspective, it's possible that a node has a mix of both fetchable and unfetchable partitions.
582582
* When the client determines which nodes to skip and which to fetch from, it's important that unfetchable
583583
* partitions don't block fetchable partitions from being fetched.

clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2189,7 +2189,7 @@ private void subscribeInternal(Collection<String> topics, Optional<ConsumerRebal
21892189
}
21902190

21912191
/**
2192-
* Process the eventsif anythat were produced by the {@link ConsumerNetworkThread network thread}.
2192+
* Process the events-if any-that were produced by the {@link ConsumerNetworkThread network thread}.
21932193
* It is possible that {@link ErrorEvent an error}
21942194
* could occur when processing the events. In such cases, the processor will take a reference to the first
21952195
* error, continue to process the remaining events, and then throw the first error that occurred.

clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ void runOnce() {
242242
}
243243

244244
/**
245-
* Process the eventsif anythat were produced by the application thread.
245+
* Process the events-if any-that were produced by the application thread.
246246
*/
247247
private void processApplicationEvents() {
248248
LinkedList<ApplicationEvent> events = new LinkedList<>();

clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -248,17 +248,17 @@ public class KafkaProducer<K, V> implements Producer<K, V> {
248248
public static final String NETWORK_THREAD_PREFIX = "kafka-producer-network-thread";
249249
public static final String PRODUCER_METRIC_GROUP_NAME = "producer-metrics";
250250

251-
private static final String INIT_TXN_TIMEOUT_MSG = "InitTransactions timed out " +
251+
private static final String INIT_TXN_TIMEOUT_MSG = "InitTransactions timed out - " +
252252
"did not complete coordinator discovery or " +
253253
"receive the InitProducerId response within max.block.ms.";
254254

255255
private static final String SEND_OFFSETS_TIMEOUT_MSG =
256-
"SendOffsetsToTransaction timed out did not reach the coordinator or " +
256+
"SendOffsetsToTransaction timed out - did not reach the coordinator or " +
257257
"receive the TxnOffsetCommit/AddOffsetsToTxn response within max.block.ms";
258258
private static final String COMMIT_TXN_TIMEOUT_MSG =
259-
"CommitTransaction timed out did not complete EndTxn with the transaction coordinator within max.block.ms";
259+
"CommitTransaction timed out - did not complete EndTxn with the transaction coordinator within max.block.ms";
260260
private static final String ABORT_TXN_TIMEOUT_MSG =
261-
"AbortTransaction timed out did not complete EndTxn(abort) with the transaction coordinator within max.block.ms";
261+
"AbortTransaction timed out - did not complete EndTxn(abort) with the transaction coordinator within max.block.ms";
262262

263263
private final String clientId;
264264
// Visible for testing

clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@
176176
public class KafkaProducerTest {
177177

178178
private static final String INIT_TXN_TIMEOUT_MSG =
179-
"InitTransactions timed out " +
179+
"InitTransactions timed out - " +
180180
"did not complete coordinator discovery or " +
181181
"receive the InitProducerId response within max.block.ms.";
182182

core/src/test/scala/unit/kafka/server/StreamsGroupHeartbeatRequestTest.scala

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ import scala.jdk.CollectionConverters._
3535
serverProperties = Array(
3636
new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"),
3737
new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"),
38-
new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true"),
3938
new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,streams"),
4039
new ClusterConfigProperty(key = "group.streams.initial.rebalance.delay.ms", value = "0")
4140
)
@@ -111,12 +110,8 @@ class StreamsGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupCo
111110
assertEquals(expectedResponse, streamsGroupHeartbeatResponse)
112111
}
113112

114-
@ClusterTest(
115-
serverProperties = Array(
116-
new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic,consumer,streams"),
117-
)
118-
)
119-
def testStreamsGroupHeartbeatIsInaccessibleWhenUnstableLatestVersionNotEnabled(): Unit = {
113+
@ClusterTest
114+
def testStreamsGroupHeartbeatIsInaccessibleWhenOffsetTopicNotExist(): Unit = {
120115
val topology = new StreamsGroupHeartbeatRequestData.Topology()
121116
.setEpoch(1)
122117
.setSubtopologies(List().asJava)

docs/README.md

Lines changed: 0 additions & 4 deletions
This file was deleted.

docs/_index.md

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
---
2+
title: AK 4.3.X
3+
description: Documentation for AK 4.3.X
4+
weight:
5+
tags: ['kafka', 'docs']
6+
aliases:
7+
keywords:
8+
type: docs
9+
---
10+
11+
<!--
12+
Licensed to the Apache Software Foundation (ASF) under one or more
13+
contributor license agreements. See the NOTICE file distributed with
14+
this work for additional information regarding copyright ownership.
15+
The ASF licenses this file to You under the Apache License, Version 2.0
16+
(the "License"); you may not use this file except in compliance with
17+
the License. You may obtain a copy of the License at
18+
19+
http://www.apache.org/licenses/LICENSE-2.0
20+
21+
Unless required by applicable law or agreed to in writing, software
22+
distributed under the License is distributed on an "AS IS" BASIS,
23+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24+
See the License for the specific language governing permissions and
25+
limitations under the License.
26+
-->
27+
28+

docs/api.html

Lines changed: 0 additions & 125 deletions
This file was deleted.

docs/apis/_index.md

Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
---
2+
title: API
3+
description:
4+
weight: 2
5+
tags: ['kafka', 'docs']
6+
aliases:
7+
keywords:
8+
type: docs
9+
---
10+
11+
<!--
12+
Licensed to the Apache Software Foundation (ASF) under one or more
13+
contributor license agreements. See the NOTICE file distributed with
14+
this work for additional information regarding copyright ownership.
15+
The ASF licenses this file to You under the Apache License, Version 2.0
16+
(the "License"); you may not use this file except in compliance with
17+
the License. You may obtain a copy of the License at
18+
19+
http://www.apache.org/licenses/LICENSE-2.0
20+
21+
Unless required by applicable law or agreed to in writing, software
22+
distributed under the License is distributed on an "AS IS" BASIS,
23+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24+
See the License for the specific language governing permissions and
25+
limitations under the License.
26+
-->
27+
28+
29+
Kafka includes six core apis:
30+
31+
1. The Producer API allows applications to send streams of data to topics in the Kafka cluster.
32+
2. The Consumer API allows applications to read streams of data from topics in the Kafka cluster.
33+
3. The Share consumer API allows applications in a share group to cooperatively consume and process data from Kafka topics.
34+
4. The Streams API allows transforming streams of data from input topics to output topics.
35+
5. The Connect API allows implementing connectors that continually pull from some source system or application into Kafka or push from Kafka into some sink system or application.
36+
6. The Admin API allows managing and inspecting topics, brokers, and other Kafka objects.
37+
Kafka exposes all its functionality over a language-independent protocol which has clients available in many programming languages. However only the Java clients are maintained as part of the main Kafka project, the others are available as independent open source projects. A list of non-Java clients is available [here](https://cwiki.apache.org/confluence/x/3gDVAQ).
38+
39+
# Producer API
40+
41+
The Producer API allows applications to send streams of data to topics in the Kafka cluster.
42+
43+
Examples of using the producer are shown in the [javadocs](/43/javadoc/index.html?org/apache/kafka/clients/producer/KafkaProducer.html "Kafka 4.3 Javadoc").
44+
45+
To use the producer, add the following Maven dependency to your project:
46+
47+
48+
<dependency>
49+
<groupId>org.apache.kafka</groupId>
50+
<artifactId>kafka-clients</artifactId>
51+
<version>4.3.0</version>
52+
</dependency>
53+
54+
# Consumer API
55+
56+
The Consumer API allows applications to read streams of data from topics in the Kafka cluster.
57+
58+
Examples of using the consumer are shown in the [javadocs](/43/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html "Kafka 4.3 Javadoc").
59+
60+
To use the consumer, add the following Maven dependency to your project:
61+
62+
63+
<dependency>
64+
<groupId>org.apache.kafka</groupId>
65+
<artifactId>kafka-clients</artifactId>
66+
<version>4.3.0</version>
67+
</dependency>
68+
69+
# Share Consumer API
70+
71+
The Share Consumer API enables applications in a share group to cooperatively consume and process data from Kafka topics.
72+
73+
Examples of using the share consumer are shown in the [javadocs](/43/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaShareConsumer.html "Kafka 4.3 Javadoc").
74+
75+
To use the share consumer, add the following Maven dependency to your project:
76+
77+
78+
<dependency>
79+
<groupId>org.apache.kafka</groupId>
80+
<artifactId>kafka-clients</artifactId>
81+
<version>4.3.0</version>
82+
</dependency>
83+
84+
# Streams API
85+
86+
The [Streams](/43/documentation/streams) API allows transforming streams of data from input topics to output topics.
87+
88+
Examples of using this library are shown in the [javadocs](/43/javadoc/index.html?org/apache/kafka/streams/KafkaStreams.html "Kafka 4.3 Javadoc").
89+
90+
Additional documentation on using the Streams API is available [here](/43/documentation/streams).
91+
92+
To use Kafka Streams, add the following Maven dependency to your project:
93+
94+
95+
<dependency>
96+
<groupId>org.apache.kafka</groupId>
97+
<artifactId>kafka-streams</artifactId>
98+
<version>4.3.0</version>
99+
</dependency>
100+
101+
When using Scala you may optionally include the `kafka-streams-scala` library. Additional documentation on using the Kafka Streams DSL for Scala is available [in the developer guide](/43/documentation/streams/developer-guide/dsl-api.html#scala-dsl).
102+
103+
To use Kafka Streams DSL for Scala 2.13, add the following Maven dependency to your project:
104+
105+
106+
<dependency>
107+
<groupId>org.apache.kafka</groupId>
108+
<artifactId>kafka-streams-scala_2.13</artifactId>
109+
<version>4.3.0</version>
110+
</dependency>
111+
112+
# Connect API
113+
114+
The Connect API allows implementing connectors that continually pull from some source data system into Kafka or push from Kafka into some sink data system.
115+
116+
Many users of Connect won't need to use this API directly, though, they can use pre-built connectors without needing to write any code. Additional information on using Connect is available [here](/documentation.html#connect).
117+
118+
Those who want to implement custom connectors can see the [javadoc](/43/javadoc/index.html?org/apache/kafka/connect "Kafka 4.3 Javadoc").
119+
120+
# Admin API
121+
122+
The Admin API supports managing and inspecting topics, brokers, acls, and other Kafka objects.
123+
124+
To use the Admin API, add the following Maven dependency to your project:
125+
126+
127+
<dependency>
128+
<groupId>org.apache.kafka</groupId>
129+
<artifactId>kafka-clients</artifactId>
130+
<version>4.3.0</version>
131+
</dependency>
132+
133+
For more information about the Admin APIs, see the [javadoc](/43/javadoc/index.html?org/apache/kafka/clients/admin/Admin.html "Kafka 4.3 Javadoc").

0 commit comments

Comments
 (0)