Kafka - 超时错误:请求在 30000 毫秒后超时



显示错误的 30000ms.it 后 Kafka 连接超时

{ TimeoutError: Request timed out after 30000ms
at new TimeoutError (/app/node_modules/kafka-node/lib/errors/TimeoutError.js:6:9)
at Timeout.timeoutId._createTimeout [as _onTimeout] (/app/node_modules/kafka-node/lib/kafkaClient.js:1007:14)
at listOnTimeout (internal/timers.js:535:17)
at processTimers (internal/timers.js:479:7) message: 'Request timed out after 30000ms' }
Tue, 22 Oct 2019 10:10:24 GMT kafka-node:KafkaClient broker is now ready
Tue, 22 Oct 2019 10:10:24 GMT kafka-node:KafkaClient kafka-node-client updated internal metadata
Kafka Producer is connected and ready.
----->data PRODUCT_REF_TOKEN { hash:
'0x964f714829cece2c5f57d5c8d677c251eff82f7fba4b5ba27b4bd650da79a954',
success: 'true' }
Tue, 22 Oct 2019 10:10:24 GMT kafka-node:KafkaClient compressing messages if needed
Tue, 22 Oct 2019 10:10:24 GMT kafka-node:KafkaClient kafka-node-client createBroker 127.0.0.1:9092
Tue, 22 Oct 2019 10:10:24 GMT kafka-node:KafkaClient missing apiSupport waiting until broker is ready...
Tue, 22 Oct 2019 10:10:24 GMT kafka-node:KafkaClient waitUntilReady [BrokerWrapper 127.0.0.1:9092 (connected: true) (ready: false) (idle: false) (needAuthentication: false) (authenticated: false)]
Tue, 22 Oct 2019 10:10:24 GMT kafka-node:KafkaClient kafka-node-client socket closed 127.0.0.1:9092 (hadError: true)
Tue, 22 Oct 2019 10:10:25 GMT kafka-node:KafkaClient kafka-node-client reconnecting to 127.0.0.1:9092
Tue, 22 Oct 2019 10:10:25 GMT kafka-node:KafkaClient kafka-node-client createBroker 127.0.0.1:9092
Tue, 22 Oct 2019 10:10:25 GMT kafka-node:KafkaClient kafka-node-client socket closed 127.0.0.1:9092 (hadError: true)
Tue, 22 Oct 2019 10:10:26 GMT kafka-node:KafkaClient kafka-node-client reconnecting to 127.0.0.1:9092
Tue, 22 Oct 2019 10:10:26 GMT kafka-node:KafkaClient kafka-node-client createBroker 127.0.0.1:9092

用于 Kafka 安装程序的 docker-compose.yml 如果需要设置任何设置或属性,请告诉我。

version: "3.5"
services:
api:
image: opschain-sapi
restart: always
command: ["yarn", "start"]
ports:
- ${API_PORT}:80
env_file:
- ./truffle/contracts.env
- ./.env
external_links:
- ganachecli-private
- ganachecli-public
networks:
- opschain_network
graphql-api:
build:
context: ./graphql-api
dockerfile: Dockerfile
command: npm run dev
ports:
- 9007:80
depends_on:
- mongodb
- graphql-api-watch
- api
volumes:
- ./graphql-api/dist:/app/dist:delegated
- ./graphql-api/src:/app/src:delegated
environment:
VIRTUAL_HOST: api.blockchain.docker
PORT: 80
OFFCHAIN_DB_URL: mongodb://root:password@mongodb:27017
OFFCHAIN_DB_NAME: opschain-wallet
OFFCHAIN_DB_USER_COLLECTION: user
JWT_PASSWORD: 'supersecret'
JWT_TOKEN_EXPIRE_TIME: 86400000
BLOCKCHAIN_API: api
networks:
- opschain_network
graphql-api-watch:
build:
context: ./graphql-api
dockerfile: Dockerfile
command: npm run watch
volumes:
- ./graphql-api/src:/app/src:delegated
- ./graphql-api/dist:/app/dist:delegated
networks:
- opschain_network
mongodb:
image: mongo:latest
ports:
- 27017:27017 
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: password
MONGO_INITDB_DATABASE: opschain-wallet
logging:
options:
max-size: 100m
networks:
- opschain_network
ui:
build:
context: ./ui
dockerfile: Dockerfile
ports:
- 9000:3000
volumes:
- ./ui/public:/app/public:delegated
- ./ui/src:/app/src:delegated
depends_on:
- graphql-api
networks:
- opschain_network
environment:
VIRTUAL_HOST: tmna.csc.docker
REACT_APP_API_BASE_URL: http://localhost:8080
logging:
options:
max-size: 10m
test:
build: ./test
volumes:
- ./test/postman:/app/postman:delegated
networks:
- opschain_network
zoo1:
image: zookeeper:3.4.9
hostname: zoo1
ports:
- 2181:2181
environment:
ZOO_MY_ID: 1
ZOO_PORT: 2181
ZOO_SERVERS: server.1=zoo1:2888:3888
volumes:
- ./pub-sub/zk-single-kafka-single/zoo1/data:/data
- ./pub-sub/zk-single-kafka-single/zoo1/datalog:/datalog
networks:
- opschain_network
kafka1:
image: confluentinc/cp-kafka:5.3.1
hostname: kafka1
ports:
- 9092:9092
environment:
KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka1:19092,LISTENER_DOCKER_EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
# KAFKA_ADVERTISED_HOST_NAME: localhost
# KAFKA_ZOOKEEPER_CONNECT: zoo1:2181
KAFKA_CREATE_TOPICS: "cat:1:1"
volumes:
- ./pub-sub/zk-single-kafka-single/kafka1/data:/var/lib/kafka/data
depends_on:
- zoo1
- api
networks:
- opschain_network
networks:
opschain_network:
external: true

在上面的撰写文件中,我公开了端口 9092 和 Zookeper 端口 2181。确切地说,我不确定问题是什么

const kafka = require('kafka-node');
const config = require('./configUtils');

function sendMessage({ topic, message }) {
let Producer = kafka.Producer,
client = new kafka.KafkaClient({ kafkaHost: config.kafka.host,autoConnect: true}),
producer = new Producer(client);
producer.on('ready', () => {
console.log('Kafka Producer is connected and ready.');
console.log('----->data',topic,message)
producer.send(
[
{
topic,
messages: [JSON.stringify(message)],
}
],
function(_err, data){
console.log('--err',_err)
console.log('------->message sent from kafka',data);
}
);
});
producer.on('error', error => {
console.error(error);
});

}
module.exports = sendMessage;

生产者文件,它连接到 Kafka 客户端并在准备就绪时生成消息

我在使用 landoop/fast-data-dev 映像和 docker-compose 时遇到了类似的问题。我能够通过确保ADV_HOST环境变量配置为 kafka 服务的名称(例如 kafka1(来解决它。然后将 kafkaHost 选项设置为服务名称。(例如卡夫卡1:9092(。

kafka 图像的环境变量显示为"KAFKA_ADVERTISED_HOST_NAME"。

相关内容

最新更新