MongoDB Apache Kafka sink connector - Partitioning

Hi
I’m using a docker-compose from Getting Started with the MongoDB Kafka Sink Connector this docker look like this:

services:
  zookeeper:
    image: confluentinc/cp-zookeeper:7.2.2
    hostname: zookeeper
    container_name: zookeeper
    networks:
      - localnet
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      KAFKA_JMX_PORT: 35000

  broker:
    image: confluentinc/cp-kafka:7.2.2
    hostname: broker
    container_name: broker
    ports:
      - "9092:9092"
    depends_on:
      - zookeeper
    networks:
      - localnet
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
      KAFKA_LISTENERS: LISTENER_1://broker:29092,LISTENER_2://broker:9092
      KAFKA_ADVERTISED_LISTENERS: LISTENER_1://broker:29092,LISTENER_2://localhost:9092
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_1:PLAINTEXT,LISTENER_2:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_1
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
      CONFLUENT_SUPPORT_CUSTOMER_ID: "anonymous"
      KAFKA_DELETE_TOPIC_ENABLE: "true"
      KAFKA_JMX_PORT: 35000
      
  schema-registry:
    image: confluentinc/cp-schema-registry:7.2.2
    hostname: schema-registry
    container_name: schema-registry
    depends_on:
      - broker
    networks:
      - localnet
    environment:
      SCHEMA_REGISTRY_HOST_NAME: schema-registry
      SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: "broker:29092"
      SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081    
          
  connect:
    build:
      context: .
      dockerfile: connect.Dockerfile
    ports:
      - "35000:35000"
      - "8083:8083"
    hostname: connect
    container_name: connect
    depends_on:
      - zookeeper
      - broker
    networks:
      - localnet
    environment:
      KAFKA_JMX_PORT: 35000
      KAFKA_JMX_HOSTNAME: localhost
      CONNECT_BOOTSTRAP_SERVERS: "broker:29092"
      CONNECT_REST_ADVERTISED_HOST_NAME: connect
      CONNECT_GROUP_ID: connect-cluster-group
      CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
      CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
      CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
      CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
      CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
      CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
      CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
      CONNECT_ZOOKEEPER_CONNECT: "zookeeper:2181"
      CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
      CONNECT_CONNECTIONS_MAX_IDLE_MS: 180000
      CONNECT_METADATA_MAX_AGE_MS: 180000
      CONNECT_AUTO_CREATE_TOPICS_ENABLE: "true"
      CONNECT_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
      CONNECT_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"

  mongo1:
    image: mongo:6.0.5
    container_name: mongo1
    command: --replSet rs0 --oplogSize 128
    ports:
      - "35001:27017"
    depends_on:
      - zookeeper
      - broker
      - connect
    networks:
      - localnet
    restart: always

  mongo1-setup:
    image: mongo:6.0.5
    container_name: mongo1-setup
    volumes: 
      - ./config-replica.js:/config-replica.js
    depends_on:
      - mongo1
    networks:
      - localnet
    entrypoint:
      [
        "bash",
        "-c",
        "sleep 10 && mongosh --host mongo1:27017 config-replica.js && sleep 10",
      ]
    restart: "no"
    
    
  mongo-express:
    image: mongo-express
    restart: always
    ports:
      - 8081:8081
    environment:
      ME_CONFIG_MONGODB_SERVER: mongo1
      ME_CONFIG_MONGODB_PORT: 27017
      ME_CONFIG_OPTIONS_EDITORTHEME: "default"
    networks:
      - localnet
    depends_on:
      - mongo1 

networks:
  localnet:
    attachable: true

I would like to know is any configuration to divide an Apache kafka topic into partitions? I have been looking for but I can’t find anything