forked from eliu/openshift-vagrant
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ansible-hosts
106 lines (93 loc) · 5.25 KB
/
ansible-hosts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#
# Copyright 2017 Liu Hongyu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
nodes
etcd
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=vagrant
# If ansible_ssh_user is not root, ansible_become must be set to true
ansible_become=true
openshift_deployment_type=origin
openshift_release='{{OPENSHIFT_RELEASE}}'
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_pkg_version=-{{OPENSHIFT_PKG_VERSION}}
# uncomment the following to enable htpasswd authentication; defaults to DenyAllPasswordIdentityProvider
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'{{HTPASSWORD_FILENAME}}}]
# Default login account: admin / handhand
openshift_master_htpasswd_users={'admin': '$apr1$gfaL16Jf$c.5LAvg3xNDVQTkk6HpGB1'}
openshift_disable_check=disk_availability,memory_availability,docker_storage,docker_image_availability
openshift_docker_options=" --selinux-enabled --log-driver=journald --storage-driver=overlay --registry-mirror=http://4a0fee72.m.daocloud.io "
#
# Author's Note
#
# Disable service catalog and TSB install
# These 2 component will lead to a failed install during tasks of Running Verification (120 tries)
# This might happen only in China so far. The workaround is to enable VPN during the verification.
#
openshift_enable_service_catalog=false
template_service_broker_install=false
# openshift_hosted_manage_registry=false
# OpenShift Router Options
# Router selector (optional)
# Router will only be created if nodes matching this label are present.
# Default value: 'region=infra'
# openshift_router_selector='node-role.kubernetes.io/infra=true'
# openshift_registry_selector='node-role.kubernetes.io/infra=true'
# default subdomain to use for exposed routes
openshift_master_default_subdomain=openshift.example.com
# host group for masters
[masters]
master.example.com openshift_ip={{NETWORK_BASE}}.101 openshift_host={{NETWORK_BASE}}.101 ansible_ssh_private_key_file="/home/vagrant/.ssh/master.key"
# host group for etcd
[etcd]
master.example.com openshift_ip={{NETWORK_BASE}}.101 openshift_host={{NETWORK_BASE}}.101 ansible_ssh_private_key_file="/home/vagrant/.ssh/master.key"
#
# host group for nodes, includes region info
# For openshift_node_labels strategies, the following reference links might be helpful
# to understand why we choose this current solution:
# - https://github.com/openshift/openshift-ansible#setup
# - https://github.com/openshift/openshift-ansible#node-group-definition-and-mapping
# - https://docs.okd.io/3.7/install_config/install/advanced_install.html#configuring-node-host-labels
# - https://docs.okd.io/3.9/install_config/install/advanced_install.html#configuring-node-host-labels
# - https://docs.okd.io/3.10/install/configuring_inventory_file.html#configuring-node-host-labels
#
# The default node selector for
# release-3.9 ( or prev versions ): 'region=infra'
# release-3.10: 'node-role.kubernetes.io/infra=true'
#
# But release-3.9 starts to enable node roles features. For backward compatibilities, we
# override the default values of openshift_router_selector and openshift_registry_selector
# from 'region=infra' to 'node-role.kubernetes.io/infra=true'
#
# @update on 2019/06/27
# Somehow, the etcd is forced to deployed as container form, which caused
# "Wait for control plane pods to appear" issue and resulted in failures at last.
# For more details about this discussions, please visit:
# https://github.com/eliu/openshift-vagrant/issues/10
# Advice has been taken from the issue discussion above and thanks to @Voronenko !
# The trick is to add ansible variables "ontainerized=false etcd_ip={ip}" to force
# master node use traditional way to install ECTD instead of containerization.
#
[nodes]
master.example.com containerized=false etcd_ip={{NETWORK_BASE}}.101 openshift_ip={{NETWORK_BASE}}.101 openshift_host={{NETWORK_BASE}}.101 ansible_ssh_private_key_file="/home/vagrant/.ssh/master.key" openshift_schedulable=true {{NODE_GROUP_MASTER_INFRA}}
node01.example.com openshift_ip={{NETWORK_BASE}}.102 openshift_host={{NETWORK_BASE}}.102 ansible_ssh_private_key_file="/home/vagrant/.ssh/node01.key" openshift_schedulable=true {{NODE_GROUP_COMPUTE}}
node02.example.com openshift_ip={{NETWORK_BASE}}.103 openshift_host={{NETWORK_BASE}}.103 ansible_ssh_private_key_file="/home/vagrant/.ssh/node02.key" openshift_schedulable=true {{NODE_GROUP_COMPUTE}}