Mount Dynamic为同一吊舱上的多个容器创建了PV



我正在处理一个用例,在这个用例中,我需要在jupythub pod中添加一个新容器,这个新容器(侧容器(监视jupythUB目录。

Jupyterhub容器在混合时产生动态PV,请参阅下面的部分

singleuser:
podNameTemplate:
extraTolerations: []
nodeSelector: {}
extraNodeAffinity:
required: []
preferred: []
extraPodAffinity:
required: []
preferred: []
extraPodAntiAffinity:
required: []
preferred: []
networkTools:
image:
name: jupyterhub/k8s-network-tools
tag: "set-by-chartpress"
pullPolicy:
pullSecrets: []
resources: {}
cloudMetadata:
# block set to true will append a privileged initContainer using the
# iptables to block the sensitive metadata server at the provided ip.
blockWithIptables: true
ip: 169.254.169.254
networkPolicy:
enabled: true
ingress: []
egress:
# Required egress to communicate with the hub and DNS servers will be
# augmented to these egress rules.
#
# This default rule explicitly allows all outbound traffic from singleuser
# pods, except to a typical IP used to return metadata that can be used by
# someone with malicious intent.
- to:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 169.254.169.254/32
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
events: true
extraAnnotations: {}
extraLabels:
hub.jupyter.org/network-access-hub: "true"
extraFiles: {}
extraEnv: {}
lifecycleHooks: {}
initContainers: []
extraContainers: []
uid: 1000
fsGid: 100
serviceAccountName:
storage:
type: dynamic
extraLabels: {}
extraVolumes: []
extraVolumeMounts: []
static:
pvcName:
subPath: "{username}"
capacity: 10Gi
homeMountPath: /home/jovyan
dynamic:
storageClass:
pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce]
image:
name: jupyterhub/k8s-singleuser-sample
tag: "set-by-chartpress"
pullPolicy:
pullSecrets: []
startTimeout: 300
cpu:
limit:
guarantee:
memory:
limit:
guarantee: 1G
extraResource:
limits: {}
guarantees: {}
cmd:
defaultUrl:
extraPodConfig: {}
profileList: []

我已经在部署文件的extraContainer部分添加了我的新容器,我的容器确实启动了,但动态PV没有安装在该容器上。

在kubernetes级别上,我试图实现的用例在技术上是否可行。

完整的yaml文件在此提供以供参考https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/main/jupyterhub/values.yaml

参考的配置图

singleuser:
baseUrl: /
cloudMetadata:
enabled: true
ip: xx.xx.xx.xx
cpu: {}
events: true
extraAnnotations: {}
extraConfigFiles:
config_files:
- cm_key: ''
content: ''
file_path: ''
- cm_key: ''
content: ''
file_path: ''
enabled: false
extraContainers:
- image: 'docker:19.03-rc-dind'
lifecycle:
postStart:
exec:
command:
- sh
- '-c'
- update-ca-certificates; echo Certificates Updated
name: dind
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/docker
name: dind-storage
- mountPath: /usr/local/share/ca-certificates/
name: docker-cert
extraEnv:
ACTUAL_HADOOP_CONF_DIR: ''
ACTUAL_HIVE_CONF_DIR: ''
ACTUAL_SPARK_CONF_DIR: ''
CDH_PARCEL_DIR: ''
DOCKER_HOST: ''
JAVA_HOME: 
LIVY_URL: ''
SPARK2_PARCEL_DIR: ''
extraLabels:
hub.jupyter.org/network-access-hub: 'true'
extraNodeAffinity:
preferred: []
required: []
extraPodAffinity:
preferred: []
required: []
extraPodAntiAffinity:
preferred: []
required: []
extraPodConfig: {}
extraResource:
guarantees: {}
limits: {}
extraTolerations: []
fsGid: 0
image:
name: >-
/jupyterhub/jpt-spark-magic
pullPolicy: IfNotPresent
tag: xxx
imagePullSecret:
email: null
enabled: false
registry: null
username: null
initContainers: []
lifecycleHooks: {}
memory:
guarantee: 1G
networkPolicy:
egress:
- to:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 169.254.169.254/32
enabled: false
ingress: []
networkTools:
image:
name: >-
/k8s-hub-multispawn
pullPolicy: IfNotPresent
tag: '12345'
nodeSelector: {}
profileList:
- description: Python for data enthusiasts
display_name: 0
kubespawner_override:
cmd:
- jpt-entry-cmd.sh
cpu_limit: 1
environment:
XYZ_SERVICE_URL: 'http://XYZ:8080'
CURL_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
DOCKER_HOST: 'tcp://localhost:2375'
HADOOP_CONF_DIR: /etc/hadoop/conf
HADOOP_HOME: /usr/hdp/3.1.5.6091-7/hadoop/
HDP_DIR: /usr/hdp/3.1.5.6091-7
HDP_HOME_DIR: /usr/hdp/3.1.5.6091-7
HDP_VERSION: 3.1.5.6091-7
HIVE_CONF_DIR: /usr/hdp/3.1.5.6091-7/hive
HIVE_HOME: /usr/hdp/3.1.5.6091-7/hive
INTEGRATION_ENV: HDP3
JAVA_HOME: /usr/jdk64/jdk1.8.0_112
LD_LIBRARY_PATH: >-
/usr/hdp/3.1.5.6091-7/hadoop/lib/native:/usr/jdk64/jdk1.8.0_112/jre:/usr/hdp/3.1.5.6091-7/usr/lib/:/usr/hdp/3.1.5.6091-7/usr/lib/
LIVY_URL: 'http://ammaster01.fake.org:8999'
MLFLOW_TRACKING_URI: 'http://mlflow:5100'
NO_PROXY: mlflow
SPARK_CONF_DIR: /etc/spark2/conf
SPARK_HOME: /usr/hdp/3.1.5.6091-7/spark2
SPARK2_PARCEL_DIR: /usr/hdp/3.1.5.6091-7/spark2
TOOLS_BASE_PATH: /usr/local/bin
image: >-
/jupyterhub/jpt-spark-magic:1.1.2
mem_limit: 4096M
uid: 0
- description: R for data enthusiasts
display_name: 1
kubespawner_override:
cmd:
- start-all.sh
environment:
XYZ_SERVICE_URL: 'http://XYZ-service:8080'
DISABLE_AUTH: 'true'
XYZ: /home/rstudio/kitematic
image: '/jupyterhub/rstudio:364094'
uid: 0
- description: Python for data enthusiasts test2
display_name: 2
kubespawner_override:
cmd:
- jpt-entry-cmd.sh
cpu_limit: 4
environment:
XYZ_SERVICE_URL: 'http://XYZ-service:8080'
CURL_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
DOCKER_HOST: 'tcp://localhost:2375'
HADOOP_CONF_DIR: /etc/hadoop/conf
HADOOP_HOME: /usr/hdp/3.1.5.6091-7/hadoop/
HDP_DIR: /usr/hdp/3.1.5.6091-7
HDP_HOME_DIR: /usr/hdp/3.1.5.6091-7
HDP_VERSION: 3.1.5.6091-7
HIVE_CONF_DIR: /usr/hdp/3.1.5.6091-7/hive
HIVE_HOME: /usr/hdp/3.1.5.6091-7/hive
INTEGRATION_ENV: HDP3
JAVA_HOME: /usr/jdk64/jdk1.8.0_112
LD_LIBRARY_PATH: >-
/usr/hdp/3.1.5.6091-7/hadoop/lib/native:/usr/jdk64/jdk1.8.0_112/jre:/usr/hdp/3.1.5.6091-7/usr/lib/:/usr/hdp/3.1.5.6091-7/usr/lib/
LIVY_URL: 'http://xyz:8999'
MLFLOW_TRACKING_URI: 'http://mlflow:5100'
NO_PROXY: mlflow
SPARK_CONF_DIR: /etc/spark2/conf
SPARK_HOME: /usr/hdp/3.1.5.6091-7/spark2
SPARK2_PARCEL_DIR: /usr/hdp/3.1.5.6091-7/spark2
TOOLS_BASE_PATH: /usr/local/bin
image: >-
/jupyterhub/jpt-spark-magic:1.1.2
mem_limit: 8192M
uid: 0
- description: Python for data enthusiasts test3
display_name: 3
kubespawner_override:
cmd:
- jpt-entry-cmd.sh
cpu_limit: 8
environment:
XYZ_SERVICE_URL: 'http://XYZ-service:8080'
CURL_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
DOCKER_HOST: 'tcp://localhost:2375'
HADOOP_CONF_DIR: /etc/hadoop/conf
HADOOP_HOME: /usr/hdp/3.1.5.6091-7/hadoop/
HDP_DIR: /usr/hdp/3.1.5.6091-7
HDP_HOME_DIR: /usr/hdp/3.1.5.6091-7
HDP_VERSION: 3.1.5.6091-7
HIVE_CONF_DIR: /usr/hdp/3.1.5.6091-7/hive
HIVE_HOME: /usr/hdp/3.1.5.6091-7/hive
INTEGRATION_ENV: HDP3
JAVA_HOME: /usr/jdk64/jdk1.8.0_112
LD_LIBRARY_PATH: >-
/usr/hdp/3.1.5.6091-7/hadoop/lib/native:/usr/jdk64/jdk1.8.0_112/jre:/usr/hdp/3.1.5.6091-7/usr/lib/:/usr/hdp/3.1.5.6091-7/usr/lib/
LIVY_URL: 'http://fake.org:8999'
MLFLOW_TRACKING_URI: 'http://mlflow:5100'
NO_PROXY: mlflow
SPARK_CONF_DIR: /etc/spark2/conf
SPARK_HOME: /usr/hdp/3.1.5.6091-7/spark2
SPARK2_PARCEL_DIR: /usr/hdp/3.1.5.6091-7/spark2
TOOLS_BASE_PATH: /usr/local/bin
image: >-
/jupyterhub/jpt-spark-magic:1.1.2
mem_limit: 16384M
uid: 0
startTimeout: 300
storage:
capacity: 10Gi
dynamic:
pvcNameTemplate: 'claim-{username}{servername}'
storageAccessModes:
- ReadWriteOnce
storageClass: nfs-client
volumeNameTemplate: 'volume-{username}{servername}'
extraLabels: {}
extraVolumeMounts:
- mountPath: /etc/krb5.conf
name: krb
readOnly: true
- mountPath: /usr/jdk64/jdk1.8.0_112
name: java-home
readOnly: true
- mountPath: /xyz/conda/envs
name: xyz-conda-envs
readOnly: false
- mountPath: /usr/hdp/
name: bigdata
readOnly: true
subPath: usr-hdp
- mountPath: /etc/hadoop/
name: bigdata
readOnly: true
subPath: HDP
- mountPath: /etc/hive/
name: bigdata
readOnly: true
subPath: hdp-hive
- mountPath: /etc/spark2/
name: bigdata
readOnly: true
subPath: hdp-spark2
extraVolumes:
- emptyDir: {}
name: dind-storage
- name: docker-cert
secret:
secretName: docker-cert
- hostPath:
path: /var/lib/ut_xyz_ts/jdk1.8.0_112
type: Directory
name: java-home
- hostPath:
path: /xyz/conda/envs
type: Directory
name: xyz-conda-envs
- hostPath:
path: /etc/krb5.conf
type: File
name: krb
- name: bigdata
persistentVolumeClaim:
claimName: bigdata
homeMountPath: '/home/{username}'
static:
subPath: '{username}'
type: dynamic
uid: 0

提前谢谢。。

对于您的问题,从技术上讲,同一吊舱的两个或多个容器是否共享相同的体积,答案在Yes中。请参阅此处-https://youtu.be/GQJP9QdHHs8?t=82。

但您也应该在额外的容器规范中定义一个volumeMount(也可以参考视频中的示例(。如果你能检查一下,或者分享kubectl describe deployment <your-deployment>的输出,我可以确认。

相关内容

  • 没有找到相关文章

最新更新