Thank you for reading this post, don't forget to subscribe!
ставим гитлаб helm chart в aws eks
aws/infra/rds.tf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
provider "postgresql" { host = aws_db_instance.infra-db.address port = aws_db_instance.infra-db.port database = "postgres" username = aws_db_instance.infra-db.username superuser = false password = data.aws_secretsmanager_secret_version.rds_admin_password_version.secret_string sslmode = "require" connect_timeout = 15 scheme = "awspostgres" } data "aws_secretsmanager_secret" "infra_rds_db_users" { name = "infra/rds_db_users" provider = aws } data "aws_secretsmanager_secret_version" "infra_rds_db_users-version" { secret_id = data.aws_secretsmanager_secret.infra_rds_db_users.id provider = aws } resource "postgresql_role" "gitlab" { name = "gitlab" login = true password = jsondecode(data.aws_secretsmanager_secret_version.infra_rds_db_users-version.secret_string)["gitlab"] } resource "postgresql_database" "gitlab" { name = "gitlab" owner = "gitlab" allow_connections = true } |
пользователь и пароль добавлены в secret manager
добавляем s3 бакеты 2 штуки, 1 для артифактов другой для бэкапов
aws/infra/s3.tf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
test-infra-gitlab = { object_ownership = "BucketOwnerEnforced" owner = { id = data.aws_canonical_user_id.current.id } attach_policy = true policy = jsonencode({ Version : "2012-10-17", Statement : [ { Sid : "AllowSSLRequestsOnly", Effect : "Deny", Principal : { AWS : "*" }, Action : [ "s3:*" ], Resource : [ "arn:aws:s3:::test-infra-gitlab/*", "arn:aws:s3:::test-infra-gitlab" ] Condition : { Bool : { "aws:SecureTransport" : "false" } } } ] }) bucket_tags = { Name = "test-infra-gitlab" } } test-infra-gitlab-backup = { object_ownership = "BucketOwnerEnforced" owner = { id = data.aws_canonical_user_id.current.id } attach_policy = true policy = jsonencode({ Version : "2012-10-17", Statement : [ { Sid : "AllowSSLRequestsOnly", Effect : "Deny", Principal : { AWS : "*" }, Action : [ "s3:*" ], Resource : [ "arn:aws:s3:::test-infra-gitlab-backup/*", "arn:aws:s3:::test-infra-gitlab-backup" ] Condition : { Bool : { "aws:SecureTransport" : "false" } } } ] }) bucket_tags = { Name = "test-infra-gitlab-backup" } } } |
создаём сервис аккаунт
aws/infra/aws-iam-access-roles.tf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
gitlab = { service_account = { name = "gitlab" policy = jsonencode({ Version = "2012-10-17", Statement = [ { Effect = "Allow", Action = [ "s3:PutObject", "s3:GetObject", "s3:DeleteObject", "s3:ListBucket" ], "Resource" : [ "arn:aws:s3:::test-infra-gitlab/*", "arn:aws:s3:::test-infra-gitlab", "arn:aws:s3:::test-infra-gitlab-backup/*", "arn:aws:s3:::test-infra-gitlab-backup" ] }, ], }) } } |
открываем порт на Ingress для ssh я выбрал порт 2222
aws/infra/eks.tf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
resource "helm_release" "nginx-ingress-controller-int" { name = "ingress-nginx-int" repository = "https://kubernetes.github.io/ingress-nginx" chart = "ingress-nginx" version = local.settings.eks.ingress_nginx_version namespace = "${local.settings.eks.ingress_nginx_ns}-int" create_namespace = true timeout = 900 values = [<<EOF controller: kind: Deployment replicaCount: 2 resources: limits: cpu: 2000m memory: 2Gi requests: cpu: 100m memory: 256Mi topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: DoNotSchedule labelSelector: matchLabels: app.kubernetes.io/name: ingress-nginx ingressClassResource: controllerValue: "k8s.io/ingress-nginx-int" default: false enabled: true name: "nginx-int" metrics: enabled: true service: enabled: true external: enabled: true annotations: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" service.beta.kubernetes.io/aws-load-balancer-type: "nlb-ip" service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" service.beta.kubernetes.io/aws-load-balancer-subnets: "${module.vpc.subnet_private_01_id}, ${module.vpc.subnet_private_02_id}, ${module.vpc.subnet_private_03_id}" service.beta.kubernetes.io/aws-load-balancer-name: "${module.eks.cluster_name}-int" service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" nginx.ingress.kubernetes.io/proxy-body-size: "5G" config: disable-ipv6: "true" disable-ipv6-dns: "true" enable-access-log-for-default-backend: "false" http2-max-field-size: 8k keep-alive: "65" large-client-header-buffers: 16 64k limit-conn-status-code: "429" limit-req-status-code: "429" load-balance: ewma proxy-body-size: 5G error-log-level: error log-format-escape-json: "true" log-format-upstream: '{"bytes_sent": "$bytes_sent", "vhost": "$host", "request_proto": "$server_protocol", "remote_addr": "$remote_addr", "proxy_add_x_forwarded_for": "$proxy_add_x_forwarded_for", "remote_user": "$remote_user", "time_local": "$time_local", "request_method": "$request_method", "request_uri": "$uri", "request_args": "$args", "request" : "$request", "status": "$status", "body_bytes_sent": "$body_bytes_sent", "http_referer": "$http_referer", "http_user_agent": "$http_user_agent", "request_length": "$request_length", "request_time" : "$request_time", "upstream_addr": "$upstream_addr", "upstream_response_length": "$upstream_response_length", "upstream_response_time": "$upstream_response_time", "upstream_status": "$upstream_status", "X-Business-Error": "$upstream_http_x_business_error", "upstream_header_time": "$upstream_header_time", "upstream_connect_time": "$upstream_connect_time","connections_waiting": "$connections_waiting", "connections_active": "connections_active"}' map-hash-bucket-size: "128" proxy-next-upstream: error timeout http_500 http_502 http_503 http_504 server-tokens: "false" ssl-protocols: TLSv1.2 TLSv1.3 ssl-session-cache: "true" ssl-session-cache-size: 20m ssl-session-timeout: 30m use-forwarded-headers: "true" use-gzip: "true" use-proxy-protocol: "true" worker-cpu-affinity: auto worker-processes: "2" tcp: 2222: "gitlab/gitlab-gitlab-shell:2222:PROXY" EOF ] depends_on = [ module.eks ] provider = helm } |
создаём почтовый сервер
aws/infra/ses.tf
1 2 3 4 5 6 7 8 9 10 |
resource "aws_ses_domain_identity" "test_tech" { domain = local.settings.dns.tech_domain } resource "aws_ses_domain_dkim" "test_tech" { domain = aws_ses_domain_identity.test_tech.domain } # iam access and secret keys we create ONLY by MANUAL from aws web interface |
логин пароль создаются вручную внутри панели хоть и выглядят как обычные ключи для iam пользователя - но это не так
aws/infra/main.tf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
### Providers ### ### Main AWS provider ### provider "aws" { region = local.settings.vpc.region } data "aws_caller_identity" "current" { provider = aws } ########################## Local settings load ###################################### locals { vars = file("variables/infra.yaml") settings = yamldecode(local.vars) } ### K8S ### provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name, "--region", local.settings.vpc.region] command = "aws" } } provider "kubectl" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name, "--region", local.settings.vpc.region] command = "aws" } } ### Helm ### provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] command = "aws" } } experiments { manifest = true } } |
вот тут задаём переменные
aws/infra/variables/infra.yaml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
eks: eks_cluster_name: "infra" eks_cluster_version: "1.31" aws_lb_controller_chart_version: "1.6.1" ingress_nginx_version: "4.8.0" ingress_nginx_ns: "web-ingress" node_key_name: "worker" main_node_group_desired_size: "2" karpenter_namespace: "karpenter" karpenter_service_account: "karpenter" karpenter_helm_version: "0.37.0" keda_namespace: "keda" keda_helm_version: "v2.12.0" harbor: namespace: "harbor" helm_version: "1.14.2" dns_name: "registry" tls_secret: "harbor-tls" creds_secret: "harbor-creds" s3_user: "harbor-s3-accessor" s3_bucket: "test-infra-harbor-resistry-store" database: "harbor" db_username: "harbor" gitlab: namespace: "gitlab" helm_version: "8.9.2" tls_secret: "gitlab-tls" dns_name: "gitlab" runner_helm_version: "0.74.1" gitlab_s3_bucket: "test-infra-gitlab" certs: cert_manager_namespace: "cert-manager" prod_issuer_name: "letsencrypt-prod" dns: base_domain: "infra" tech_domain: "test.tech" cf_token_secret_name: "cloudflare-token-secret" |
template для гитлаба
aws/infra/templates/gitlab.yaml.tmpl
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
nginx-ingress: enabled: false global: hosts: # Домен, по которому будет доступен GitLab domain: "${domain}" ingress: enabled: true configureCertmanager: false provider: nginx class: "${ingress_class}" annotations: nginx.ingress.kubernetes.io/force-ssl-redirect: "true" tls: secretName: "${gitlab_tls_secret}" hosts: - gitlab."${domain}" # Глобальный nodeSelector для всех компонентов nodeSelector: dedication: "service" provisioning: "karpenter" psql: main: # Параметры подключения к внешней базе данных для основной части GitLab host: "${db_host}" port: 5432 username: "${db_username}" password: secret: "${db_password_secret}" key: "${db_password_key}" database: "${db_database}" ci: # Параметры подключения для базы данных CI host: "${db_host}" port: 5432 username: "${db_username}" password: secret: "${db_password_secret}" key: "${db_password_key}" database: "${db_database}" minio: enabled: false serviceAccount: enabled: true annotations: eks.amazonaws.com/role-arn: "${service_account}" email: display_name: 'GitLab' from: "gitlab@${base_domain}" reply_to: "noreply@${base_domain}" smtp: enabled: true address: "${smtp_host}" port: 587 domain: "${base_domain}" user_name: "${smtp_user_name}" password: secret: "${smtp_passwort_secret}" # имя секрета, где хранится пароль key: "${smtp_password_key}" #password authentication: "login" # обычно используется "login" или "plain" starttls_auto: true registry: enabled: false shell: port: 2222 # так же открываем порт на ingress controller tcp: 2222: "gitlab/gitlab-gitlab-shell:2222:PROXY" tcp: proxyProtocol: false appConfig: lfs: bucket: "${s3_bucket}" path: "lfs" connection: secret: "${s3_connections_secret}" key: "${s3_connections_key}" artifacts: bucket: "${s3_bucket}" path: "artifacts" connection: secret: "${s3_connections_secret}" key: "${s3_connections_key}" uploads: bucket: "${s3_bucket}" path: "uploads" connection: secret: "${s3_connections_secret}" key: "${s3_connections_key}" packages: bucket: "${s3_bucket}" path: "packages" connection: secret: "${s3_connections_secret}" key: "${s3_connections_key}" backups: bucket: "${gitlab_backup_s3_bucket}" path: "backups" tmpBucket: "${gitlab_backup_s3_bucket}" tmpPath: "tmp" # гугловая авторизация omniauth: enabled: true allowSingleSignOn: ['google_oauth2'] syncProfileAttributes: ['email'] autoLinkSamlUser: true blockAutoCreatedUsers: false autoLinkUser: ['google_oauth2'] providers: - secret: "${google_auth_secret}" key: "${google_auth_key}" redis: install: true master: resources: requests: cpu: "${redis_cpu_request}" memory: "${redis_memory_request}" limits: cpu: "${redis_cpu_limit}" memory: "${redis_memory_limit}" persistence: enabled: true storageClass: gp3 size: "${redis_disk_size}" # Отключаем certmanager certmanager: installCRDs: false install: false # Отключаем установку встроенного PostgreSQL, т.к. используется внешняя база postgresql: install: false # Registry – хранение Docker-образов (если используется) registry: enabled: false # Prometheus – мониторинг GitLab (если устанавливается вместе с GitLab) prometheus: install: false # Основные компоненты GitLab (Rails/Sidekiq/Webservice) gitlab: webservice: ingress: proxyBodySize: "5000m" proxyConnectTimeout: "8000" proxyReadTimeout: "8000" resources: requests: cpu: "${webservice_cpu_request}" memory: "${webservice_memory_request}" limits: cpu: "${webservice_cpu_limit}" memory: "${webservice_memory_limit}" sidekiq: concurrency: "20" maxReplicas: "40" resources: requests: cpu: "${sidekiq_cpu_request}" memory: "${sidekiq_memory_request}" limits: cpu: "${sidekiq_cpu_limit}" memory: "${sidekiq_memory_limit}" hpa: cpu: targetType: Value targetAverageValue: 350m toolbox: resources: requests: cpu: "${toolbox_cpu_request}" memory: "${toolbox_memory_request}" limits: cpu: "${toolbox_cpu_limit}" memory: "${toolbox_memory_limit}" backups: cron: enabled: "true" schedule: "@daily" nodeSelector: dedication: "services-spot" provisioning: "karpenter" tolerations: - key: "dedicated" operator: "Equal" value: "services-spot" effect: "NoSchedule" resources: requests: cpu: 1000m memory: 1000M persistence: enabled: "true" accessMode: ReadWriteOnce useGenericEphemeralVolume: false storageClass: "gp3" size: 30Gi objectStorage: config: secret: "${s3_connections_secret}" key: "${s3_connections_key}" # Gitaly – хранит Git-репозитории GitLab gitaly: resources: requests: cpu: "${gitaly_cpu_request}" memory: "${gitaly_memory_request}" limits: cpu: "${gitaly_cpu_limit}" memory: "${gitaly_memory_limit}" persistence: enabled: true storageClass: gp3 size: "${gitaly_disk_size}" gitlab-shell: replicaCount: 2 service: enabled: true type: ClusterIP port: 2222 resources: requests: cpu: "${gitlab_shell_cpu_request}" memory: "${gitlab_shell_memory_request}" limits: cpu: "${gitlab_shell_cpu_limit}" memory: "${gitlab_shell_memory_limit}" kas: replicaCount: 2 resources: requests: cpu: "${kas_cpu_request}" memory: "${kas_memory_request}" limits: cpu: "${kas_cpu_limit}" memory: "${kas_memory_limit}" gitlab-exporter: resources: requests: cpu: "${gitlab_exporter_cpu_request}" memory: "${gitlab_exporter_memory_request}" limits: cpu: "${gitlab_exporter_cpu_limit}" memory: "${gitlab_exporter_memory_limit}" gitlab-runner: install: false |
для gitlab-runner
aws/infra/templates/gitlab-runner-self-hosted.yaml.tmpl
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
imagePullPolicy: ${runner_image_pull_policy} gitlabUrl: ${runner_gitlab_url} runnerRegistrationToken: ${runner_registration_token} concurrent: ${runner_concurrent} checkInterval: ${runner_check_interval} terminationGracePeriodSeconds: ${runner_termination_grace_period_seconds} rbac: create: ${runner_rbac_create} rules: - resources: ["configmaps", "pods", "pods/attach", "secrets", "services"] verbs: ["get", "list", "watch", "create", "patch", "update", "delete"] - apiGroups: [""] resources: ["pods/exec"] verbs: ["create", "patch", "delete"] serviceAccountName: runner-gitlab-runner metrics: enabled: ${runner_metrics_enabled} serviceMonitor: enabled: true interval: 15s labels: release: kube-prometheus-stack service: enabled: true nodeSelector: dedication: "service" provisioning: "karpenter" serviceAccount: create: true name: runner-gitlab-runner runners: serviceAccountName: runner-gitlab-runner config: | [[runners]] [runners.kubernetes] namespace = "{{.Release.Namespace}}" image = "${runner_image_name}" image_pull_secrets = ["${harbor_docker_image_pull_secret}"] # build container cpu_limit = "${runner_build_cpu_limit}" memory_limit = "${runner_build_memory_limit}" cpu_limit_overwrite_max_allowed = "${runner_build_cpu_limit}" memory_limit_overwrite_max_allowed = "${runner_build_memory_limit}" # service containers service_cpu_limit = "${runner_service_cpu_limit}" service_memory_limit = "${runner_service_memory_limit}" service_cpu_limit_overwrite_max_allowed = "${runner_service_cpu_limit}" service_memory_limit_overwrite_max_allowed = "${runner_service_memory_limit}" # helper container helper_cpu_limit = "${runner_helper_cpu_limit}" helper_memory_limit = "${runner_helper_memory_limit}" helper_cpu_limit_overwrite_max_allowed = "${runner_helper_cpu_limit}" helper_memory_limit_overwrite_max_allowed = "${runner_helper_memory_limit}" service_account = "runner-gitlab-runner" [[runners.kubernetes.volumes.config_map]] name = "${harbor_docker_config_name}" mount_path = "/root/.docker/config.json" sub_path = "config.json" [runners.kubernetes.node_selector] "provisioning" = "${runner_provisioning}" [runners.kubernetes.node_tolerations] "dedicated=infra" = "NoSchedule" image: ${runner_image_name} imagePullPolicy: "always" privileged: ${runner_privileged} tags: ${runner_tags} runUntagged: ${runner_run_untagged} resources: limits: memory: 350Mi cpu: 400m ephemeral-storage: 600Mi requests: memory: 150Mi cpu: 200m ephemeral-storage: 300Mi |
все компоненты для гитлаба:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 |
resource "kubernetes_namespace" "gitlab" { metadata { annotations = { name = local.settings.eks.gitlab.namespace } name = local.settings.eks.gitlab.namespace } provider = kubernetes } ### TLS certificates for Gitlab ### resource "kubernetes_manifest" "gitlab_tls" { manifest = yamldecode(<<EOF apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: "${local.settings.eks.gitlab.tls_secret}" namespace: "${local.settings.eks.gitlab.namespace}" spec: dnsNames: - "${local.settings.eks.gitlab.dns_name}.${local.settings.dns.base_domain}.${local.settings.dns.tech_domain}" - "www.${local.settings.eks.gitlab.dns_name}.${local.settings.dns.base_domain}.${local.settings.dns.tech_domain}" secretName: "${local.settings.eks.gitlab.tls_secret}" issuerRef: name: letsencrypt-prod kind: ClusterIssuer EOF ) provider = kubernetes } data "aws_secretsmanager_secret" "gitlab_db_password" { name = "infra/rds_db_users" } data "aws_secretsmanager_secret_version" "gitlab_db_password" { secret_id = data.aws_secretsmanager_secret.gitlab_db_password.id } resource "kubernetes_secret" "gitlab_db_secret" { metadata { name = "gitlab-db-password" namespace = kubernetes_namespace.gitlab.id } data = { db-gitlab-password = lookup(jsondecode(data.aws_secretsmanager_secret_version.infra_rds_db_users-version.secret_string), "gitlab", "") } provider = kubernetes } resource "kubernetes_secret" "gitlab_s3_connections_info" { metadata { name = "gitlab-s3-connections-info" namespace = local.settings.eks.gitlab.namespace } data = { connection = jsonencode({ provider = "AWS", region = "eu-central-1", host = "s3.eu-central-1.amazonaws.com", use_iam_profile = true }) } type = "Opaque" } data "aws_secretsmanager_secret" "gitlab_secret" { name = "infra/gitlab" } data "aws_secretsmanager_secret_version" "gitlab_secret" { secret_id = data.aws_secretsmanager_secret.gitlab_secret.id } resource "kubernetes_secret" "gitlab_secret" { metadata { name = "gitlab-google-auth" namespace = kubernetes_namespace.gitlab.id } data = { connection = jsonencode({ name = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "name", "") provider = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "provider", "") app_id = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "client-id", "") app_secret = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "client-secret", "") callback_url = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "callback_url", "") scope = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "scope", "") access_type = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "access_type", "") prompt = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "prompt", "") }) } provider = kubernetes } resource "kubernetes_secret" "gitlab_smtp_pass" { metadata { name = "gitlab-smtp-pass" namespace = kubernetes_namespace.gitlab.id } data = { pass = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "SMTP_PASSWORD", "") } provider = kubernetes } resource "helm_release" "gitlab" { name = "gitlab" repository = "https://charts.gitlab.io/" chart = "gitlab" version = local.settings.eks.gitlab.helm_version namespace = local.settings.eks.gitlab.namespace create_namespace = false max_history = "5" timeout = 900 values = [templatefile("${path.module}/templates/gitlab.yaml.tmpl", { domain = "${local.settings.dns.base_domain}.${local.settings.dns.tech_domain}" base_domain = "${local.settings.dns.tech_domain}" ingress_class = "nginx-int" gitlab_tls_secret = "${local.settings.eks.gitlab.tls_secret}" service_account = "${module.aws_service_accounts["gitlab"].role_arn}" db_host = "${aws_db_instance.infra-db.address}" db_username = "${postgresql_role.gitlab.name}" db_database = "${postgresql_database.gitlab.name}" db_password_secret = "${kubernetes_secret.gitlab_db_secret.metadata[0].name}" db_password_key = "${keys(kubernetes_secret.gitlab_db_secret.data)[0]}" smtp_host = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "SMTP_HOST", "") smtp_user_name = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "SMTP_USER", "") smtp_passwort_secret = "${kubernetes_secret.gitlab_smtp_pass.metadata[0].name}" smtp_password_key = "${keys(kubernetes_secret.gitlab_smtp_pass.data)[0]}" s3_bucket = "${local.settings.eks.gitlab.gitlab_s3_bucket}" s3_connections_secret = "${kubernetes_secret.gitlab_s3_connections_info.metadata[0].name}" s3_connections_key = "${keys(kubernetes_secret.gitlab_s3_connections_info.data)[0]}" google_auth_secret = "${kubernetes_secret.gitlab_secret.metadata[0].name}" google_auth_key = "${keys(kubernetes_secret.gitlab_secret.data)[0]}" webservice_cpu_request = "500m" webservice_cpu_limit = "1" webservice_memory_request = "2Gi" webservice_memory_limit = "4Gi" sidekiq_cpu_request = "200m" sidekiq_cpu_limit = "1500m" sidekiq_memory_request = "2Gi" sidekiq_memory_limit = "4Gi" toolbox_cpu_request = "100m" toolbox_cpu_limit = "500m" toolbox_memory_request = "300Mi" toolbox_memory_limit = "1Gi" gitaly_cpu_request = "500m" gitaly_cpu_limit = "1" gitaly_memory_request = "1Gi" gitaly_memory_limit = "2Gi" gitaly_disk_size = "40Gi" redis_cpu_request = "100m" redis_cpu_limit = "500m" redis_memory_request = "100Mi" redis_memory_limit = "300Mi" redis_disk_size = "5Gi" gitlab_shell_cpu_request = "100m" gitlab_shell_cpu_limit = "500m" gitlab_shell_memory_request = "100Mi" gitlab_shell_memory_limit = "300Mi" kas_cpu_request = "100m" kas_cpu_limit = "200m" kas_memory_request = "100Mi" kas_memory_limit = "200Mi" gitlab_exporter_cpu_request = "100m" gitlab_exporter_cpu_limit = "500m" gitlab_exporter_memory_request = "100Mi" gitlab_exporter_memory_limit = "200Mi" gitlab_backup_s3_bucket = "test-infra-gitlab-backup" })] } resource "kubernetes_config_map" "harbor_config" { metadata { name = "autentification-to-harbor" namespace = local.settings.eks.gitlab.namespace } data = { "config.json" = templatefile("${path.module}/templates/harbor_docker_config.tmpl", { dns_name = "${local.settings.eks.harbor.dns_name}" base_domain = "${local.settings.dns.base_domain}" tech_domain = "${local.settings.dns.tech_domain}" auth = "${base64encode(join(":", ["harbor@gitlab-runner", resource.random_password.gitlab_runner_password.result]))}" }) } } resource "kubernetes_secret" "harbor_secret" { metadata { name = "autentification-to-harbor" namespace = local.settings.eks.gitlab.namespace } data = { ".dockerconfigjson" = templatefile("${path.module}/templates/harbor_docker_config.tmpl", { dns_name = "${local.settings.eks.harbor.dns_name}" base_domain = "${local.settings.dns.base_domain}" tech_domain = "${local.settings.dns.tech_domain}" auth = "${base64encode(join(":", ["harbor@gitlab-runner", resource.random_password.gitlab_runner_password.result]))}" }) } type = "kubernetes.io/dockerconfigjson" } resource "helm_release" "gitlab_runner" { name = "gitlab-runner" repository = "https://charts.gitlab.io/" chart = "gitlab-runner" version = local.settings.eks.gitlab.runner_helm_version namespace = local.settings.eks.gitlab.namespace create_namespace = false timeout = 900 values = [templatefile("${path.module}/templates/gitlab-runner-self-hosted.yaml.tmpl", { runner_image_pull_policy = "Always" runner_gitlab_url = "https://gitlab.${local.settings.dns.base_domain}.${local.settings.dns.tech_domain}" runner_registration_token = lookup(jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string), "runner-token", "") runner_concurrent = "20" runner_check_interval = "1" runner_termination_grace_period_seconds = "3600" runner_rbac_create = "true" runner_metrics_enabled = "true" runner_image_name = "ubuntu:22.04" runner_build_cpu_limit = "7" runner_build_memory_limit = "25000Mi" runner_service_cpu_limit = "1" runner_service_memory_limit = "2000Mi" runner_helper_cpu_limit = "1" runner_helper_memory_limit = "1024Mi" runner_privileged = "true" runner_tags = "test" runner_run_untagged = "true" runner_provisioning = "karpenter-gitlab-runner" runner_namespace = local.settings.eks.gitlab.namespace harbor_docker_config_name = "${kubernetes_config_map.harbor_docker_config.metadata[0].name}" harbor_docker_image_pull_secret = "${kubernetes_secret.harbor_docker_config.metadata[0].name}" })] } ######################## backup ################ resource "aws_iam_role" "gitlab_backup_role" { name = "gitlab-backup-iam-role" assume_role_policy = <<POLICY { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Federated": "arn:aws:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${module.eks.oidc_provider}" }, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { "${module.eks.oidc_provider}:aud": "sts.amazonaws.com", "${module.eks.oidc_provider}:sub": "system:serviceaccount:gitlab:gitlab-backup" } } } ] } POLICY } resource "aws_iam_policy" "gitlab_backup_s3_access_iam_policy" { name = "gitlab_backup_s3_access_iam_policy" description = "Policy for gitlab-backup to access S3" policy = <<EOF { "Version" : "2012-10-17", "Statement" : [ { "Sid": "AllowgitlabbackupToChangeObjects", "Effect" : "Allow", "Action" : [ "s3:*" ], "Resource" : [ "arn:aws:s3:::test-infra-gitlab-backup", "arn:aws:s3:::test-infra-gitlab-backup/*" ] } ] } EOF } resource "aws_iam_role_policy_attachment" "backup_policy_attach" { policy_arn = aws_iam_policy.gitlab_backup_s3_access_iam_policy.arn role = aws_iam_role.gitlab_backup_role.name } resource "kubectl_manifest" "backup_sa" { yaml_body = <<EOF apiVersion: v1 kind: ServiceAccount metadata: name: gitlab-backup namespace: "${local.settings.eks.gitlab.namespace}" annotations: eks.amazonaws.com/role-arn: ${aws_iam_role.gitlab_backup_role.arn} EOF provider = kubectl } resource "kubectl_manifest" "gitlab_backup_cronjob" { yaml_body = <<EOF apiVersion: batch/v1 kind: CronJob metadata: name: gitlab-backup namespace: "${local.settings.eks.gitlab.namespace}" spec: suspend: false schedule: 0 */12 * * * failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 jobTemplate: spec: template: spec: serviceAccountName: gitlab-backup restartPolicy: OnFailure containers: - image: registry.infra.test.tech/common/gitlab-backup:latest name: gitlab-backup imagePullPolicy: Always env: - name: GITLAB_URL value: "gitlab.${local.settings.dns.base_domain}.${local.settings.dns.tech_domain}" - name: GITLAB_TOKEN value: "${jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string)["GITLAB_BACKUP_TOKEN"]}" - name: S3_BUCKET value: "test-infra-gitlab-backup" - name: S3_PREFIX value: gitlab-backup-only-repo - name: S3_FILES_AMOUNT value: "90" - name: SLACK_WEBHOOK_URL value: "${jsondecode(data.aws_secretsmanager_secret_version.gitlab_secret.secret_string)["SLACK_WEBHOOK_URL"]}" EOF provider = kubectl } resource "aws_s3_bucket_lifecycle_configuration" "backup_gitlab" { bucket = "test-infra-gitlab-backup" rule { id = "rule-clear-gitlab-backup" expiration { days = 14 } status = "Enabled" } } #################################### ############ backup EBS ############ #################################### resource "aws_dlm_lifecycle_policy" "gitlab_pvc_snapshot" { description = "Daily snapshot for GitLab PVC and retention of 14 days" execution_role_arn = aws_iam_role.dlm_role.arn state = "ENABLED" policy_details { resource_types = ["VOLUME"] target_tags = { "kubernetes.io/created-for/pvc/name" = "repo-data-gitlab-gitaly-0" } schedule { name = "daily-gitlab-pvc-snapshot" create_rule { interval = 24 interval_unit = "HOURS" times = ["21:00"] } retain_rule { count = 14 # Хранить 14 последних снапшотов } copy_tags = true } } } resource "aws_iam_role" "dlm_role" { name = "gitlab-ebs-snapshots" assume_role_policy = jsonencode({ Version = "2012-10-17" Statement = [{ Effect = "Allow" Principal = { Service = "dlm.amazonaws.com" } Action = "sts:AssumeRole" }] }) } resource "aws_iam_role_policy_attachment" "dlm_role_policy" { role = aws_iam_role.dlm_role.name policy_arn = "arn:aws:iam::aws:policy/service-role/AWSDataLifecycleManagerServiceRole" } |
я использую harbor в качестве registry в котором у меня хранятся образы
так же запускаю отдельную cronjob которая создаёт бэкапы проектов. я создаю image которые создаёт архив с бэкапами и складывает в s3
вот этот скрипт:
Dockerfile
1 2 3 4 5 |
FROM python:3.11 COPY . /app WORKDIR /app RUN pip install -r requirements.txt ENTRYPOINT [ "python3", "/app/app.py" ] |
.env_example
1 2 3 4 5 6 |
GITLAB_TOKEN="" S3_BUCKET = "" S3_PREFIX = "" S3_FILES_AMOUNT = 30 #The number of files saved in the repository SLACK_WEBHOOK_URL = "" DEBUG = "false" |
app.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import os import gitlab from git import Repo import datetime import tarfile import boto3 from slack_sdk.webhook import WebhookClient import logging logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO) GITLAB_TOKEN = os.getenv('GITLAB_TOKEN') GITLAB_URL = os.getenv('GITLAB_URL', 'gitlab.com') gitBasePathRelative = os.getenv('BASE_PATH', default="repos/") S3_BUCKET = os.getenv('S3_BUCKET') S3_PREFIX = os.getenv('S3_PREFIX') S3_FILES_AMOUNT = int(os.getenv('S3_FILES_AMOUNT', default=60)) SLACK_WEBHOOK_URL = os.getenv('SLACK_WEBHOOK_URL') DEBUG = os.getenv('DEBUG', default="false") gl = gitlab.Gitlab(url=f'https://{GITLAB_URL}', private_token=GITLAB_TOKEN) def send_slack_message(text): webhook = WebhookClient(SLACK_WEBHOOK_URL) if DEBUG != "true": response = webhook.send(text=text) def make_tarfile(output_filename, source_dir): with tarfile.open(output_filename, "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir)) def get_sorted_file_from_s3(bucket, prefix): client = boto3.client("s3") paginator = client.get_paginator("list_objects") result_files = [] for result in paginator.paginate(Bucket=bucket, Delimiter="/", Prefix=f"{prefix}/", MaxKeys=100000): if result.get("Contents") is not None: for file in result.get("Contents"): result_files.append(file) get_last_modified = lambda obj: int(obj["LastModified"].strftime("%s")) # Sorted files by date. sorted_files = [ obj for obj in sorted(result_files, key=get_last_modified, reverse=False) ] return sorted_files # Copy repos for p in gl.projects.list(archived=0, owned=True, iterator=True): try: logging.info(f"Cloning project {p.name}") pathToFolder = gitBasePathRelative + p.namespace['full_path'] + "/" + p.name if not os.path.exists(pathToFolder): os.makedirs(pathToFolder) Repo.clone_from(f"https://gitlab-ci-token:{GITLAB_TOKEN}@{GITLAB_URL}/{p.path_with_namespace}.git", pathToFolder) except Exception as error: logging.info(f"An exception occurred: {error}") send_slack_message(f"Gitlab backup cant clone project: {p.path_with_namespace} Error: {error}") # Compress projects logging.info(f"Compress projects") try: date = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") make_tarfile(f"{date}.tar.gz", gitBasePathRelative) except Exception as error: logging.info(f"An exception occurred: {error}") send_slack_message(f"Gitlab backup cant compress projects. Error: {error}") exit(1) # Push repos.tar.gz to s3 logging.info(f"Push to s3") try: s3 = boto3.resource('s3') s3.Bucket(S3_BUCKET).upload_file(f"{date}.tar.gz", f"{S3_PREFIX}/{date}.tar.gz") except Exception as error: logging.info(f"An exception occurred: {error}") send_slack_message(f"Gitlab backup cant push projects to s3. Error: {error}") exit(1) # Clear old s3 files logging.info(f"Clear old s3 files") try: s3files = get_sorted_file_from_s3(S3_BUCKET,S3_PREFIX) if len(s3files) > S3_FILES_AMOUNT: for i in range(0, len(s3files)-S3_FILES_AMOUNT): key = s3files[i]["Key"] logging.info(f"Delete {key}") s3.Object(S3_BUCKET, key).delete() except Exception as error: logging.info(f"An exception occurred: {error}") send_slack_message(f"Gitlab backup cant clear old s3 files. Error: {error}") exit(1) |
для успешного запуска этого скрипта нужно создать access-token и поместить его в aws secret manager под имененм GITLAB_BACKUP_TOKEN
так же я делаю снапшоты EBS диска на котором хранится gitlaly делаю это через aws_dlm_lifecycle_policy
можно для этого использовать и backup сервис
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
resource "aws_backup_vault" "gitlab" { name = "gitlab-backup" kms_key_arn = "arn:aws:kms:eu-central-1:096495940642:key/006ca35a-519d-47bd-9c3a-edbcddae2f04" tags = { Name = "gitlab-backup" } } resource "aws_backup_plan" "gitlab" { name = "gitlab-backup" rule { rule_name = "gitlab-backup" target_vault_name = aws_backup_vault.gitlab.name schedule = "cron(00 21 * * ? *)" lifecycle { delete_after = 14 } } } resource "aws_backup_selection" "gitlab_backup_selection" { name = "gitlab-ebs-backup-selection" iam_role_arn = aws_iam_role.backup_role.arn plan_id = aws_backup_plan.gitlab.id selection_tag { type = "STRINGEQUALS" key = "kubernetes.io/created-for/pvc/name" value = "repo-data-gitlab-gitaly-0" } } resource "aws_iam_role" "backup_role" { name = "aws-backup-role" assume_role_policy = jsonencode({ Version = "2012-10-17", Statement = [{ Effect = "Allow" Principal = { Service = "backup.amazonaws.com" } Action = "sts:AssumeRole" }] }) } resource "aws_iam_role_policy_attachment" "backup_policy_attachment" { role = aws_iam_role.backup_role.name policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup" } |
так как для создания бэкапа с помощью toolbox я использую диск, то во время установки
terraform apply --target helm_release.gitlab
спустя минут 5 установка подвиснет,
нужно запустить джобу:
kubectl create job --from=cronjob/gitlab-toolbox-backup gitlab-toolbox-backup-manual-$(date +%s) -n gitlab
чтоб PVC подсоединился и установка helm завершилась успешно.
рутовый пароль можно посмотреть в кластере
kubectl get secret -n gitlab gitlab-gitlab-initial-root-password -o yaml
создаём токен для гитлаб раннера
получаем токен, добавляем его в aws secret manager infra/gitlab переменная называется runner-token
и запускаем установку:
terraform apply --target helm_release.gitlab_runner
теперь нужно мигрировать данные с облачного гитлаба, для этого настраиваем миграцию:
лимиты выставляем по максимуму.
вот такая настройка нужна чтоб после invite нового пользователя он мог заходить с помощью google auth без aprove со стороны админки гитлаба.
со стороны облачного гитлаба создаём токен с указанными правами:
в нашем гитлабе создаём новую группу и указываем что будем её импортировать с помощью токена созданного в облачном гитлабе
указываем название нашей группы - лучше сделать чтоб она совпадала с облачной
запускаем и ждём. у меня это заняло около 5-7 часов - всё зависит сколько проектов и сколько данных надо тащить.
во время такого переноса переменные не переносятся, поэтому воспользуйтесь следующим скриптом:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 |
#!/bin/bash set -e # === Настройки исходного (облачного) GitLab === GITLAB_CLOUD_TOKEN="glpat-RTc-AexVubiVn9q" GITLAB_CLOUD_DOMAIN="gitlab.com" GITLAB_CLOUD_GROUP="test-tech" # === Настройки целевого (self-hosted) GitLab === GITLAB_SELF_HOSTED_TOKEN="glpat-edgV1_QnYFSSkgu" GITLAB_SELF_HOSTED_DOMAIN="gitlab.infra.test.tech" GITLAB_SELF_HOSTED_GROUP="test-tech" # Функция логирования log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" } # Функция для URL‑кодирования (исправленная версия) urlencode() { local string="${1}" local strlen=${#string} local encoded="" local pos c o for (( pos=0; pos<strlen; pos++ )); do c=${string:$pos:1} case "$c" in [-_.~a-zA-Z0-9]) o="$c" ;; *) printf -v o '%%%02X' "'$c" ;; esac encoded+="$o" done echo "$encoded" } ############################################################################### # Функция process_group: рекурсивно обрабатывает группу, её групповые переменные, # переменные проектов, а затем обходит подгруппы. ############################################################################### process_group() { local source_group_path="$1" local target_group_path="$2" # Получаем информацию об исходной группе local source_group_json source_group_json=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_CLOUD_TOKEN" \ "https://$GITLAB_CLOUD_DOMAIN/api/v4/groups/$(urlencode "$source_group_path")") local source_group_id source_group_id=$(echo "$source_group_json" | jq -r '.id') if [ -z "$source_group_id" ] || [ "$source_group_id" = "null" ]; then log "ОШИБКА: Группа-источник '$source_group_path' не найдена." return fi # Получаем информацию о целевой группе local target_group_json target_group_json=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/groups/$(urlencode "$target_group_path")") local target_group_id target_group_id=$(echo "$target_group_json" | jq -r '.id') if [ -z "$target_group_id" ] || [ "$target_group_id" = "null" ]; then log "ОШИБКА: Целевая группа '$target_group_path' не найдена." return fi log "=== Обработка группы '$source_group_path' (ID: $source_group_id, целевая ID: $target_group_id) ===" # Перенос групповых переменных local group_vars_json group_vars_json=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_CLOUD_TOKEN" \ "https://$GITLAB_CLOUD_DOMAIN/api/v4/groups/$source_group_id/variables?per_page=100") local var_count var_count=$(echo "$group_vars_json" | jq '. | length') log "Найдено $var_count переменных в группе '$source_group_path'." echo "$group_vars_json" | jq -c '.[]' | while read -r var; do local key value protected masked environment_scope variable_type description source_raw raw_val payload key=$(echo "$var" | jq -r '.key') value=$(echo "$var" | jq -r '.value') protected=$(echo "$var" | jq '.protected // false') masked=$(echo "$var" | jq '.masked // false') environment_scope=$(echo "$var" | jq -r '.environment_scope // ""') variable_type=$(echo "$var" | jq -r '.variable_type') description=$(echo "$var" | jq -r '.description // ""') # Извлекаем raw как JSON-литерал (true или false) source_raw=$(echo "$var" | jq '.raw // false') raw_val=$source_raw # Формируем payload (без поля raw) local base_payload base_payload=$(jq -n \ --arg key "$key" \ --arg value "$value" \ --argjson protected "$protected" \ --argjson masked "$masked" \ --arg environment_scope "$environment_scope" \ --arg variable_type "$variable_type" \ --arg description "$description" \ '{key: $key, value: $value, protected: $protected, masked: $masked, environment_scope: $environment_scope, variable_type: $variable_type, description: $description}') payload=$(echo "$base_payload" | jq --argjson raw "$raw_val" '. + {raw: $raw}') # Перенос переменной в целевую группу local existing existing=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/groups/$(urlencode "$target_group_path")/variables/$(urlencode "$key")") if echo "$existing" | jq -e '.id' > /dev/null 2>&1; then local target_raw target_raw=$(echo "$existing" | jq '.raw') if [ "$target_raw" != "$raw_val" ]; then log "Группа: Переменная '$key': отличается raw (целевой: $target_raw, желаемый: $raw_val). Удаляю и создаю заново." curl -s --request DELETE --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/groups/$(urlencode "$target_group_path")/variables/$(urlencode "$key")" > /dev/null log "Создаю переменную '$key' в группе '$target_group_path'…" curl -s --request POST --header "Content-Type: application/json" --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ -d "$payload" "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/groups/$(urlencode "$target_group_path")/variables" > /dev/null else log "Обновляю переменную '$key' в группе '$target_group_path'…" curl -s --request PUT --header "Content-Type: application/json" --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ -d "$payload" "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/groups/$(urlencode "$target_group_path")/variables/$(urlencode "$key")" > /dev/null fi else log "Создаю переменную '$key' в группе '$target_group_path'…" curl -s --request POST --header "Content-Type: application/json" --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ -d "$payload" "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/groups/$(urlencode "$target_group_path")/variables" > /dev/null fi done # Перенос переменных для проектов внутри данной группы (с пагинацией через временный файл) local projects_json_all_file projects_json_all_file=$(mktemp) echo '[]' > "$projects_json_all_file" local page=1 while : ; do log "Загрузка страницы проектов для группы '$source_group_path', страница $page" local projects_page_json projects_page_json=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_CLOUD_TOKEN" \ "https://$GITLAB_CLOUD_DOMAIN/api/v4/groups/$(urlencode "$source_group_path")/projects?include_subgroups=false&per_page=100&page=$page") if [[ "$projects_page_json" == "[]" ]]; then break fi local tmp_file tmp_file=$(mktemp) jq -s '.[0] + .[1]' "$projects_json_all_file" <(echo "$projects_page_json") > "$tmp_file" mv "$tmp_file" "$projects_json_all_file" ((page++)) done jq -c '.[]' "$projects_json_all_file" | while read -r proj; do local source_project_id path_with_namespace source_project_id=$(echo "$proj" | jq -r '.id') path_with_namespace=$(echo "$proj" | jq -r '.path_with_namespace') log "Проект: $path_with_namespace (ID: $source_project_id)" local project_vars project_vars=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_CLOUD_TOKEN" \ "https://$GITLAB_CLOUD_DOMAIN/api/v4/projects/$source_project_id/variables") local var_count var_count=$(echo "$project_vars" | jq '. | length') log "Найдено $var_count переменных в проекте '$path_with_namespace'." local relative_path relative_path=$(echo "$path_with_namespace" | sed "s#^$GITLAB_CLOUD_GROUP/##") local target_project_full_path="$GITLAB_SELF_HOSTED_GROUP/$relative_path" local encoded_target_project encoded_target_project=$(echo "$target_project_full_path" | sed 's/\//%2F/g') local target_project_json target_project_json=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$encoded_target_project") local target_project_id target_project_id=$(echo "$target_project_json" | jq -r '.id') if [ -z "$target_project_id" ] || [ "$target_project_id" = "null" ]; then log "Целевой проект '$target_project_full_path' не найден. Пропускаю." log "--------------------------------------------" continue fi log "Целевой проект: $target_project_full_path (ID: $target_project_id)" echo "$project_vars" | jq -c '.[]' | while read -r var; do local key value protected masked environment_scope variable_type description source_raw payload key=$(echo "$var" | jq -r '.key') value=$(echo "$var" | jq -r '.value') protected=$(echo "$var" | jq -r '.protected') masked=$(echo "$var" | jq -r '.masked') environment_scope=$(echo "$var" | jq -r '.environment_scope') variable_type=$(echo "$var" | jq -r '.variable_type') description=$(echo "$var" | jq -r '.description // ""') source_raw=$(echo "$var" | jq -r '.raw // "false"') if [ "$source_raw" = "true" ]; then raw_val=true else raw_val=false fi local base_payload base_payload=$(jq -n \ --arg key "$key" \ --arg value "$value" \ --argjson protected "$protected" \ --argjson masked "$masked" \ --arg environment_scope "$environment_scope" \ --arg variable_type "$variable_type" \ --arg description "$description" \ '{key: $key, value: $value, protected: $protected, masked: $masked, environment_scope: $environment_scope, variable_type: $variable_type, description: $description}') payload=$(echo "$base_payload" | jq --argjson raw "$raw_val" '. + {raw: $raw}') local existing existing=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$target_project_id/variables/$(urlencode "$key")") if echo "$existing" | jq -e '.key' > /dev/null 2>&1; then local target_raw target_raw=$(echo "$existing" | jq -r '.raw') if [ "$target_raw" != "$raw_val" ]; then log "Проект '$target_project_full_path': Переменная '$key': raw отличается (целевой: $target_raw, желаемый: $raw_val). Удаляю и создаю заново." curl -s --request DELETE --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$target_project_id/variables/$(urlencode "$key")" > /dev/null log "Создаю переменную '$key' в проекте '$target_project_full_path'…" curl -s --request POST --header "Content-Type: application/json" --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ -d "$payload" "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$target_project_id/variables" > /dev/null else log "Обновляю переменную '$key' в проекте '$target_project_full_path'…" curl -s --request PUT --header "Content-Type: application/json" --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ -d "$payload" "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$target_project_id/variables/$(urlencode "$key")" > /dev/null fi else log "Создаю переменную '$key' в проекте '$target_project_full_path'…" curl -s --request POST --header "Content-Type: application/json" --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ -d "$payload" "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$target_project_id/variables" > /dev/null fi done log "--------------------------------------------" done rm "$projects_json_all_file" # Рекурсивно обходим подгруппы текущей группы local subgroups_json subgroups_json=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_CLOUD_TOKEN" \ "https://$GITLAB_CLOUD_DOMAIN/api/v4/groups/$(urlencode "$source_group_path")/subgroups?per_page=100") local sub_count sub_count=$(echo "$subgroups_json" | jq '. | length') if [ "$sub_count" -gt 0 ]; then local i sub_path sub_id for (( i=0; i<sub_count; i++ )); do sub_path=$(echo "$subgroups_json" | jq -r ".[$i].full_path") sub_id=$(echo "$subgroups_json" | jq -r ".[$i].id") log "----> Рекурсивная обработка подгруппы '$sub_path' (ID: $sub_id) ---->" process_group "$sub_path" "$sub_path" done fi } # Запускаем обработку, начиная с основной группы process_group "$GITLAB_CLOUD_GROUP" "$GITLAB_SELF_HOSTED_GROUP" log "Перенос переменных завершён." |
в нём нужно указать
# === Настройки исходного (облачного) GitLab ===
GITLAB_CLOUD_TOKEN= токен в облачном гитлабе
GITLAB_CLOUD_DOMAIN="gitlab.com"
GITLAB_CLOUD_GROUP=группа в облачном гитлабе
# === Настройки целевого (self-hosted) GitLab ===
GITLAB_SELF_HOSTED_TOKEN= токен в нашем гитлабе
GITLAB_SELF_HOSTED_DOMAIN="gitlab.infra.test.tech"
GITLAB_SELF_HOSTED_GROUP= группа в нашем гитлабе
запускаем скрипт и переменные переносятся.
если вдруг кто то из разработчиков успел добавить новые коммиты в облачный гитлаб и нужно синхронизировать данные то используем следующий скрипт:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
#!/bin/bash # Требования: curl, jq, git # === Настройки исходного (облачного) GitLab === GITLAB_CLOUD_TOKEN="glpat-Rn5-AexVubiVn9q" GITLAB_CLOUD_DOMAIN="gitlab.com" GITLAB_CLOUD_GROUP="test-tech" # === Настройки целевого (self-hosted) GitLab === GITLAB_SELF_HOSTED_TOKEN="glpat-dKV1_QnYFSSkgu" GITLAB_SELF_HOSTED_DOMAIN="gitlab.infra.test.tech" GITLAB_SELF_HOSTED_GROUP="test-tech" # Рабочая директория для временных данных WORKDIR="./migration_tmp" mkdir -p "$WORKDIR" START_DIR=$(pwd) log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" } urlencode() { local string="${1}" local strlen=${#string} local encoded="" local pos c o for (( pos=0 ; pos<strlen ; pos++ )); do c=${string:$pos:1} case "$c" in [-_.~a-zA-Z0-9]) o="$c" ;; *) printf -v o '%%%02X' "'$c" ;; esac encoded+="${o}" done echo "$encoded" } # === Шаг 1: Получаем список всех проектов (с пагинацией) === log "Облачный GitLab: Получаю список проектов группы '$GITLAB_CLOUD_GROUP' (включая подгруппы) с $GITLAB_CLOUD_DOMAIN…" ENCODED_GROUP=$(urlencode "$GITLAB_CLOUD_GROUP") page=1 projects=() while : ; do log "Загрузка страницы проектов номер $page" projects_json=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_CLOUD_TOKEN" \ "https://$GITLAB_CLOUD_DOMAIN/api/v4/groups/$ENCODED_GROUP/projects?include_subgroups=true&per_page=100&page=$page") if [[ "$projects_json" == "[]" ]]; then log "Больше нет проектов на странице $page. Завершаю получение списка." break fi while IFS=$'\t' read -r proj_path_ns proj_http_url; do projects+=("$proj_path_ns|||$proj_http_url") done < <(echo "$projects_json" | jq -r '.[] | [.path_with_namespace, .http_url_to_repo] | @tsv') log "Страница $page: Получено проектов: $(echo "$projects_json" | jq '. | length')" ((page++)) done log "Всего найдено проектов: ${#projects[@]}" log "============================================" # === Шаг 2: Обрабатываем каждый проект === for entry in "${projects[@]}"; do SOURCE_PATH_NS="${entry%%|||*}" SOURCE_REPO_URL="${entry##*|||}" RELATIVE_PATH=${SOURCE_PATH_NS#"$GITLAB_CLOUD_GROUP/"} PROJECT_NAME=$(basename "$SOURCE_PATH_NS") log "Облачный GitLab: Начало обработки проекта: $SOURCE_PATH_NS" # Формируем URL целевого репозитория TARGET_REPO_URL="https://oauth2:$GITLAB_SELF_HOSTED_TOKEN@$GITLAB_SELF_HOSTED_DOMAIN/$GITLAB_SELF_HOSTED_GROUP" if [[ "$RELATIVE_PATH" != "$PROJECT_NAME" ]]; then TARGET_REPO_URL="$TARGET_REPO_URL/$(dirname "$RELATIVE_PATH")" fi TARGET_REPO_URL="$TARGET_REPO_URL/$PROJECT_NAME.git" SOURCE_REPO_URL_WITH_TOKEN=$(echo "$SOURCE_REPO_URL" | sed "s|https://|https://oauth2:$GITLAB_CLOUD_TOKEN@|") TARGET_PROJECT_PATH="$GITLAB_SELF_HOSTED_GROUP/$RELATIVE_PATH" ENCODED_TARGET_PROJECT=$(urlencode "$TARGET_PROJECT_PATH") TARGET_PROJECT_JSON=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$ENCODED_TARGET_PROJECT") PROJECT_ID=$(echo "$TARGET_PROJECT_JSON" | jq -r '.id') if [[ -z "$PROJECT_ID" || "$PROJECT_ID" == "null" ]]; then log "Self-hosted GitLab: Ошибка - не удалось получить информацию о проекте $TARGET_PROJECT_PATH. Пропускаю проект." log "============================================" continue fi # Снимаем защиту с веток PROTECTED_BRANCHES=$(curl -s --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$PROJECT_ID/protected_branches" | jq -r '.[].name') declare -a PROTECTED_ARRAY=() while IFS= read -r branch; do PROTECTED_ARRAY+=("$branch") done <<< "$PROTECTED_BRANCHES" for branch in "${PROTECTED_ARRAY[@]}"; do if [[ -n "$branch" ]]; then log "Self-hosted GitLab: Снимаю защиту с ветки: $branch" curl -s --request DELETE --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$PROJECT_ID/protected_branches/$(urlencode "$branch")" > /dev/null fi done # Клонируем self-hosted репозиторий, чтобы сохранить локальные изменения PROJECT_DIR="$WORKDIR/$PROJECT_NAME" rm -rf "$PROJECT_DIR" log "Self-hosted GitLab: Клонирую репозиторий $TARGET_REPO_URL…" git clone "$TARGET_REPO_URL" "$PROJECT_DIR" cd "$PROJECT_DIR" || continue # Указываем пользователя для git git config user.name "Migration Bot" git config user.email "migration-bot@test.tech" # Добавляем облачный репозиторий как remote 'cloud' log "Облачный GitLab: Добавляю репозиторий как remote 'cloud'…" log "g it remote add cloud $SOURCE_REPO_URL_WITH_TOKEN" git remote add cloud "${SOURCE_REPO_URL_WITH_TOKEN}" log "Облачный GitLab: Получаю изменения из remote 'cloud'…" git fetch cloud # Создаём локальные копии всех веток из облачного GitLab, которых нет локально for branch in $(git branch -r | grep 'cloud/' | sed 's#cloud/##'); do if ! git show-ref --verify --quiet "refs/heads/$branch"; then log "Создаю ветку $branch из облачного remote" git checkout -b "$branch" "cloud/$branch" fi done # Перебираем все локальные ветки и выполняем merge for branch in $(git for-each-ref --format='%(refname:short)' refs/heads/); do log "Обновляю ветку $branch…" git checkout "$branch" if git show-ref --verify --quiet "refs/remotes/cloud/$branch"; then git merge "cloud/$branch" --no-edit else log "Облачный репозиторий не содержит ветку $branch, пропускаю merge." fi done # Удаляем remote 'cloud' перед push для надежности log "Удаляю remote 'cloud' перед push…" git remote remove cloud log "Self-hosted GitLab: Пушу обновлённые ветки…" git push origin --all git push origin --tags cd "$START_DIR" rm -rf "$PROJECT_DIR" # Восстанавливаем защиту веток for branch in "${PROTECTED_ARRAY[@]}"; do if [[ -n "$branch" ]]; then log "Self-hosted GitLab: Восстанавливаю защиту для ветки: $branch" curl -s --request POST --header "PRIVATE-TOKEN: $GITLAB_SELF_HOSTED_TOKEN" \ --data "name=$branch" \ "https://$GITLAB_SELF_HOSTED_DOMAIN/api/v4/projects/$PROJECT_ID/protected_branches" > /dev/null fi done log "Обработка проекта $SOURCE_PATH_NS завершена." log "============================================" done log "Все проекты обработаны." |
в нём нужно указать
# === Настройки исходного (облачного) GitLab ===
GITLAB_CLOUD_TOKEN= токен в облачном гитлабе
GITLAB_CLOUD_DOMAIN="gitlab.com"
GITLAB_CLOUD_GROUP=группа в облачном гитлабе
# === Настройки целевого (self-hosted) GitLab ===
GITLAB_SELF_HOSTED_TOKEN= токен в нашем гитлабе
GITLAB_SELF_HOSTED_DOMAIN="gitlab.infra.test.tech"
GITLAB_SELF_HOSTED_GROUP= группа в нашем гитлабе
этот скрипт будет перетаскивать данные с помощью fetch и rebase т.е. данные затирать не будут
после переезда не забываем поправить для ВСЕХ репок кто может мёржить (merge) в репку
если у вас есть include из одного проекта в другой (например как у меня шаблоны хранятся в одном репозитории и я их подтягиваю в остальных проектах) возникнет ошибка вот такого вида:
Project `devops/gitlab-ci-templates` not found or access denied! Make sure any includes in the pipeline configuration are correctly defi
её решаем следующим образом
и каждого пользователя который запускает ci/cd pipeline добавляем в проект которые мы include с минимальными правами reporter - по другому работать не будет.